From c940cc0c8d453f4bec782e6fbb02b50ba638467b Mon Sep 17 00:00:00 2001 From: r-caamano Date: Fri, 31 May 2024 03:26:38 +0000 Subject: [PATCH] Added support for tunneling L2tpV3 over ziti-edge-tunnel. Fixed README formating issue --- CHANGELOG.md | 14 ++++++++++++ README.md | 14 +++++++++++- src/zfw.c | 2 +- src/zfw_tc_outbound_track.c | 43 +++++++++++++++++++++++++++++++++++-- 4 files changed, 69 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6514110..182970d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,20 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). --- +# [0.7.3] - 2024-05-30 + +### + +-- Added support for L2tpV3 over ziti with l2tp tunnel terminating on the same vm as ziti-edge-tunnel. + In order to support this a unique ZITI_DNS_IP_RANGE must be set on both vms terminating l2tpv3. The + source of the L2tpv3 tunnel on each zet host needs to be set to the ip address assigned to the ziti0 + interface which will be the first host address in the ZITI_DNS_IP_RANGE. In addition you will need to enable + ebpf outbound tracking on the loopback interface. This can be setup vi /opt/openziti/etc/ebpf_config.json i.e. + ``` + {"InternalInterfaces":[{"Name":"eth0", "OutboundPassThroughTrack": false, "PerInterfaceRules": false}, {"Name":"lo", "OutboundPassThroughTrack": true}],"ExternalInterfaces":[]} + ``` +-- Fixed Readme.md formatting issue introduced in 0.7.2 + # [0.7.2] - 2024-05-28 ### diff --git a/README.md b/README.md index 958026b..f5f14cb 100644 --- a/README.md +++ b/README.md @@ -232,6 +232,17 @@ sudo systemctl restart ziti-router.service ``` +### Ziti Edge Tunnel L2tp Tunnel over ziti (zfw-tunnel only) + +To support L2tpV3 over ziti with l2tp tunnel terminating on the same vm as ziti-edge-tunnel. +In order to support this a unique ZITI_DNS_IP_RANGE must be set on both vms terminating l2tpv3. The +source of the L2tpv3 tunnel on each zet host needs to be set to the ip address assigned to the ziti0 +interface which will be the first host address in the ZITI_DNS_IP_RANGE. In addition you will need to enable +ebpf outbound tracking on the loopback interface. This can be setup vi /opt/openziti/etc/ebpf_config.json i.e. +``` +{"InternalInterfaces":[{"Name":"eth0", "OutboundPassThroughTrack": false, "PerInterfaceRules": false}, {"Name":"lo", "OutboundPassThroughTrack": true}],"ExternalInterfaces":[]} +``` + ### Ziti Edge Tunnel Bidirectional Transparency (zfw-tunnel only) In order to allow internal tunneler connections over ziti the default operation has been set to not delete any tunX link routes. This will disable the ability to support transparency. There is an environmental variable ```TRANSPARENT_MODE='true'``` that can be set in the ```/opt/openziti/etc/ziti-edge-tunnel.env``` file to enable deletion of tunX routes if bi-directional transparency is required at the expense of disabling internal tunneler interception. @@ -282,7 +293,8 @@ sudo reboot 0000000000000000000000 tcp 0.0.0.0/0 100.64.0.0/10 dpts=5201:5201 TUNMODE redirect:ziti0 [] 0000000000000000000000 udp 0.0.0.0/0 100.64.0.0/10 dpts=5201:5201 TUNMODE redirect:ziti0 [] - IMPORTANT: These entries will remain until as long as there is at least one wildcard in a service using the port/port range via cli and will not be removed by ziti service deletion. It is recommended to use single ports with wild card since the low port acts as a key and thus the first service that gets entered will dictate the range for the ports and there is only one prefix. + IMPORTANT: These entries will remain until as long as there is at least one wildcard in a service using the port/port range via cli and will not be removed by ziti service deletion. It is recommended to use single ports with wild card since the low port acts as a key and thus the first service that gets entered will dictate the range for the ports and there is only one prefix. + ``` ## Ebpf Map User Space Management --- diff --git a/src/zfw.c b/src/zfw.c index cbfcca0..7f732eb 100644 --- a/src/zfw.c +++ b/src/zfw.c @@ -182,7 +182,7 @@ char *log_file_name; char *object_file; char *direction_string; -const char *argp_program_version = "0.7.2"; +const char *argp_program_version = "0.7.3"; struct ring_buffer *ring_buffer; __u32 if_list[MAX_IF_LIST_ENTRIES]; diff --git a/src/zfw_tc_outbound_track.c b/src/zfw_tc_outbound_track.c index 34f11b4..70c8174 100644 --- a/src/zfw_tc_outbound_track.c +++ b/src/zfw_tc_outbound_track.c @@ -102,6 +102,25 @@ struct diag_ip4 { bool ddos_filtering; }; +/*value to ifindex_tun_map*/ +struct ifindex_tun { + uint32_t index; + char ifname[IFNAMSIZ]; + char cidr[16]; + uint32_t resolver; + char mask[3]; + bool verbose; +}; + +/*tun ifindex map*/ +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(uint32_t)); + __uint(value_size, sizeof(struct ifindex_tun)); + __uint(max_entries, 1); + __uint(pinning, LIBBPF_PIN_BY_NAME); +} ifindex_tun_map SEC(".maps"); + //map to keep status of diagnostic rules struct { __uint(type, BPF_MAP_TYPE_HASH); @@ -137,6 +156,13 @@ struct { __uint(pinning, LIBBPF_PIN_BY_NAME); } rb_map SEC(".maps"); +/*get entry from tun ifindex map*/ +static inline struct ifindex_tun *get_tun_index(uint32_t key){ + struct ifindex_tun *iftun; + iftun = bpf_map_lookup_elem(&ifindex_tun_map, &key); + return iftun; +} + /*Insert entry into tcp state table*/ static inline void insert_tcp(struct tcp_state tstate, struct tuple_key key){ bpf_map_update_elem(&tcp_map, &key, &tstate,0); @@ -303,6 +329,9 @@ int bpf_sk_splice(struct __sk_buff *skb){ return TC_ACT_OK; } + /*get entry from tun ifindex map*/ + struct ifindex_tun *tun_if = get_tun_index(0); + /* find ethernet header from skb->data pointer */ struct ethhdr *eth = (struct ethhdr *)(unsigned long)(skb->data); /* verify its a valid eth header within the packet bounds */ @@ -327,11 +356,21 @@ int bpf_sk_splice(struct __sk_buff *skb){ event.daddr = tuple->ipv4.daddr; event.sport = tuple->ipv4.sport; event.dport = tuple->ipv4.dport; - + /*if packet egressing on loopback interface and its source is the ziti0 ip address + *redirect the packet to the ziti0 interface. Added to provide support for L2tpV3 over + *over openziti with ziti-edge-tunnel + */ + if((skb->ifindex == 1) && tun_if && tun_if->resolver){ + uint32_t tun_ip = bpf_ntohl(tun_if->resolver) - 1; + if(tuple->ipv4.saddr == bpf_htonl(tun_ip)){ + return bpf_redirect(tun_if->index, 0); + } + } /* if tcp based tuple implement stateful inspection to see if they were * initiated by the local OS if not then its passthrough traffic and so wee need to * setup our own state to track the outbound pass through connections in via shared hashmap - * with with ingress tc program*/ + * with with ingress tc program + */ if(tcp){ event.proto = IPPROTO_TCP; struct iphdr *iph = (struct iphdr *)(skb->data + sizeof(*eth));