Skip to content

Commit

Permalink
Added IPv4 masquerade support
Browse files Browse the repository at this point in the history
  • Loading branch information
r-caamano committed Aug 1, 2024
1 parent 4879b2f commit 59f37cd
Show file tree
Hide file tree
Showing 4 changed files with 189 additions and 13 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ All notable changes to this project will be documented in this file. The format
# [0.8.10] - 2024-07-29

- Updated start_ebpf_controller.py to only clear ingress filters on restart and also removed ```-r, --route``` from the flush.
- Added native masquerade for IPv6 passthrough
- Added native masquerade for IPv4/IPv6 passthrough connections.

###
# [0.8.9] - 2024-07-28
Expand Down
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,14 @@ edge-routers.

## New features in 0.8.x -

### Native ebpf based IPv6 Masquerade support
### Native EBPF based IPv4 and IPv6 Masquerade support

zfw can now provide native IPv6 masquerade which can be enabled via
zfw can now provide native IPv4/IPv6 masquerade operation for outbound pass through connections which can be enabled via:

```sudo zfw -k, --masquerade <ifname>```

This function requires that both ingress and egress TC filters are enabled on outbound interface.

### Explicit Deny Rules
This feature adds the ability to enter explicit deny rules by appending ```-d, --disable to the -I, --insert rule`` to both ingress and egress rules. Rule precedence is based on longest match prefix. If the prefix is the same then the precedence follows the order entry of the rules, which when listed will go from top to bottom for ports with in the same prefix e.g.

Expand Down
94 changes: 94 additions & 0 deletions src/zfw_tc_ingress.c
Original file line number Diff line number Diff line change
Expand Up @@ -1405,6 +1405,54 @@ int bpf_sk_splice(struct __sk_buff *skb){
}
}
}
if(local_diag->masquerade && local_ip4 && local_ip4->count && (local_ip4->ipaddr[0] == tuple->ipv4.daddr)){
struct masq_key mk = {0};
mk.dport = tuple->ipv4.sport;
mk.sport = tuple->ipv4.dport;
mk.ifindex = skb->ifindex;
mk.protocol = IPPROTO_TCP;
struct masq_value *mv = get_masquerade(mk);
if(mv){
__u32 l3_sum = bpf_csum_diff((__u32 *)&iph->daddr, sizeof(iph->daddr),(__u32 *)&mv->__in46_u_origin.ip, sizeof(mv->__in46_u_origin.ip), 0);
iph->daddr = mv->__in46_u_origin.ip;
/*Calculate l3 Checksum*/
bpf_l3_csum_replace(skb, sizeof(struct ethhdr) + offsetof(struct iphdr, check), 0, l3_sum, 0);
iph = (struct iphdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(iph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tuple = (struct bpf_sock_tuple *)(void*)(long)&iph->saddr;
if(!tuple){
return TC_ACT_SHOT;
}
tuple_len = sizeof(tuple->ipv4);
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tcph = (struct tcphdr *)((unsigned long)iph + sizeof(*iph));
if ((unsigned long)(tcph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
/*Calculate l4 Checksum*/
bpf_l4_csum_replace(skb, sizeof(struct ethhdr) + sizeof(struct iphdr) + offsetof(struct tcphdr, check),local_ip4->ipaddr[0], iph->daddr, BPF_F_PSEUDO_HDR | 4);
iph = (struct iphdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(iph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tuple = (struct bpf_sock_tuple *)(void*)(long)&iph->saddr;
if(!tuple){
return TC_ACT_SHOT;
}
tuple_len = sizeof(tuple->ipv4);
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tcph = (struct tcphdr *)((unsigned long)iph + sizeof(*iph));
if ((unsigned long)(tcph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
}
}
tcp_state_key.__in46_u_dst.ip = tuple->ipv4.saddr;
tcp_state_key.__in46_u_src.ip = tuple->ipv4.daddr;
tcp_state_key.sport = tuple->ipv4.dport;
Expand Down Expand Up @@ -1501,6 +1549,50 @@ int bpf_sk_splice(struct __sk_buff *skb){
bpf_sk_release(sk);
/*reply to outbound passthrough check*/
}else{
if(local_diag->masquerade && local_ip4 && local_ip4->count && (local_ip4->ipaddr[0] == tuple->ipv4.daddr)){
struct masq_key mk = {0};
mk.dport = tuple->ipv4.sport;
mk.sport = tuple->ipv4.dport;
mk.ifindex = skb->ifindex;
mk.protocol = IPPROTO_UDP;
struct masq_value *mv = get_masquerade(mk);
if(mv){
struct iphdr *iph = (struct iphdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(iph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
__u32 l3_sum = bpf_csum_diff((__u32 *)&iph->daddr, sizeof(iph->daddr),(__u32 *)&mv->__in46_u_origin.ip, sizeof(mv->__in46_u_origin.ip), 0);
iph->daddr = mv->__in46_u_origin.ip;
/*Calculate l3 Checksum*/
bpf_l3_csum_replace(skb, sizeof(struct ethhdr) + offsetof(struct iphdr, check), 0, l3_sum, 0);
iph = (struct iphdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(iph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tuple = (struct bpf_sock_tuple *)(void*)(long)&iph->saddr;
if(!tuple){
return TC_ACT_SHOT;
}
tuple_len = sizeof(tuple->ipv4);
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
/*Calculate l4 Checksum*/
bpf_l4_csum_replace(skb, sizeof(struct ethhdr) + sizeof(struct iphdr) + offsetof(struct udphdr, check),local_ip4->ipaddr[0], iph->daddr, BPF_F_PSEUDO_HDR | 4);
iph = (struct iphdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(iph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tuple = (struct bpf_sock_tuple *)(void*)(long)&iph->saddr;
if(!tuple){
return TC_ACT_SHOT;
}
tuple_len = sizeof(tuple->ipv4);
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
}
}
udp_state_key.__in46_u_dst.ip = tuple->ipv4.saddr;
udp_state_key.__in46_u_src.ip = tuple->ipv4.daddr;
udp_state_key.sport = tuple->ipv4.dport;
Expand Down Expand Up @@ -1613,6 +1705,7 @@ int bpf_sk_splice(struct __sk_buff *skb){
struct masq_value *mv = get_masquerade(mk);
if(mv){
memcpy(ip6h->daddr.in6_u.u6_addr32, mv->__in46_u_origin.ip6, sizeof(mv->__in46_u_origin.ip6));
/*Calculate l4 Checksum*/
for(int x = 0; x < 4; x++){
bpf_l4_csum_replace(skb, sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + offsetof(struct tcphdr, check), local_ip6->ipaddr[0][x], ip6h->daddr.in6_u.u6_addr32[x], BPF_F_PSEUDO_HDR | 4);
ip6h = (struct ipv6hdr *)(skb->data + sizeof(*eth));
Expand Down Expand Up @@ -1740,6 +1833,7 @@ int bpf_sk_splice(struct __sk_buff *skb){
struct masq_value *mv = get_masquerade(mk);
if(mv){
memcpy(ip6h->daddr.in6_u.u6_addr32, mv->__in46_u_origin.ip6, sizeof(ip6h->daddr));
/*Calculate l4 Checksum*/
for(int x = 0; x < 4; x++){
bpf_l4_csum_replace(skb, sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + offsetof(struct udphdr, check), local_ip6->ipaddr[0][x], ip6h->daddr.in6_u.u6_addr32[x], BPF_F_PSEUDO_HDR | 4);
ip6h = (struct ipv6hdr *)(skb->data + sizeof(*eth));
Expand Down
100 changes: 90 additions & 10 deletions src/zfw_tc_outbound_track.c
Original file line number Diff line number Diff line change
Expand Up @@ -949,8 +949,6 @@ int bpf_sk_splice(struct __sk_buff *skb){

if(tcp)
{


event.proto = IPPROTO_TCP;
struct tcphdr *tcph = (struct tcphdr *)((unsigned long)iph + sizeof(*iph));
if ((unsigned long)(tcph + 1) > (unsigned long)skb->data_end){
Expand Down Expand Up @@ -2095,6 +2093,54 @@ int bpf_sk_splice6(struct __sk_buff *skb){
tcp_state_key.__in46_u_dst.ip = tuple->ipv4.daddr;
tcp_state_key.sport = tuple->ipv4.sport;
tcp_state_key.dport = tuple->ipv4.dport;
if(local_diag->masquerade && local_ip4 && local_ip4->count){
__u32 l3_sum = bpf_csum_diff((__u32 *)&tuple->ipv4.saddr, sizeof(tuple->ipv4.saddr), (__u32 *)&local_ip4->ipaddr[0], sizeof(local_ip4->ipaddr[0]), 0);
struct masq_value mv = {0};
mv.__in46_u_origin.ip = tuple->ipv4.saddr;
struct masq_key mk = {0};
mk.dport = tuple->ipv4.dport;
mk.sport = tuple->ipv4.sport;
mk.ifindex = skb->ifindex;
mk.protocol = IPPROTO_TCP;
insert_masquerade(mv, mk);
iph->saddr = local_ip4->ipaddr[0];
/*Calculate l3 Checksum*/
bpf_l3_csum_replace(skb, sizeof(struct ethhdr) + offsetof(struct iphdr, check), 0, l3_sum, 0);
iph = (struct iphdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(iph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tuple = (struct bpf_sock_tuple *)(void*)(long)&iph->saddr;
if(!tuple){
return TC_ACT_SHOT;
}
tuple_len = sizeof(tuple->ipv4);
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tcph = (struct tcphdr *)((unsigned long)iph + sizeof(*iph));
if ((unsigned long)(tcph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
/*Calculate l4 Checksum*/
bpf_l4_csum_replace(skb, sizeof(struct ethhdr) + sizeof(struct iphdr) + offsetof(struct tcphdr, check), mv.__in46_u_origin.ip, iph->saddr, BPF_F_PSEUDO_HDR | 4);
iph = (struct iphdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(iph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tuple = (struct bpf_sock_tuple *)(void*)(long)&iph->saddr;
if(!tuple){
return TC_ACT_SHOT;
}
tuple_len = sizeof(tuple->ipv4);
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tcph = (struct tcphdr *)((unsigned long)iph + sizeof(*iph));
if ((unsigned long)(tcph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
}
unsigned long long tstamp = bpf_ktime_get_ns();
struct tcp_state *tstate;
if(tcph->syn && !tcph->ack){
Expand Down Expand Up @@ -2199,6 +2245,46 @@ int bpf_sk_splice6(struct __sk_buff *skb){
udp_state_key.__in46_u_dst.ip = tuple->ipv4.daddr;
udp_state_key.sport = tuple->ipv4.sport;
udp_state_key.dport = tuple->ipv4.dport;
if(local_diag->masquerade && local_ip4 && local_ip4->count){
__u32 l3_sum = bpf_csum_diff((__u32 *)&tuple->ipv4.saddr, sizeof(tuple->ipv4.saddr), (__u32 *)&local_ip4->ipaddr[0], sizeof(local_ip4->ipaddr[0]), 0);
struct masq_value mv = {0};
mv.__in46_u_origin.ip = tuple->ipv4.saddr;
struct masq_key mk = {0};
mk.dport = tuple->ipv4.dport;
mk.sport = tuple->ipv4.sport;
mk.ifindex = skb->ifindex;
mk.protocol = IPPROTO_UDP;
insert_masquerade(mv, mk);
iph->saddr = local_ip4->ipaddr[0];
/*Calculate l3 Checksum*/
bpf_l3_csum_replace(skb, sizeof(struct ethhdr) + offsetof(struct iphdr, check), 0, l3_sum, 0);
iph = (struct iphdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(iph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tuple = (struct bpf_sock_tuple *)(void*)(long)&iph->saddr;
if(!tuple){
return TC_ACT_SHOT;
}
tuple_len = sizeof(tuple->ipv4);
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
/*Calculate l4 Checksum*/
bpf_l4_csum_replace(skb, sizeof(struct ethhdr) + sizeof(struct iphdr) + offsetof(struct udphdr, check), mv.__in46_u_origin.ip, iph->saddr, BPF_F_PSEUDO_HDR | 4);
iph = (struct iphdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(iph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
tuple = (struct bpf_sock_tuple *)(void*)(long)&iph->saddr;
if(!tuple){
return TC_ACT_SHOT;
}
tuple_len = sizeof(tuple->ipv4);
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
}
struct udp_state *ustate = get_udp(udp_state_key);
if((!ustate) || (ustate->tstamp > (tstamp + 30000000000))){
struct udp_state us = {
Expand Down Expand Up @@ -2277,13 +2363,13 @@ int bpf_sk_splice6(struct __sk_buff *skb){
mk.protocol = IPPROTO_TCP;
insert_masquerade(mv, mk);
memcpy(ip6h->saddr.in6_u.u6_addr32, local_ip6->ipaddr[0], sizeof(local_ip6->ipaddr[0]));
/*Calculate l4 Checksum*/
for(int x = 0; x < 4; x++){
bpf_l4_csum_replace(skb, sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + offsetof(struct tcphdr, check), mv.__in46_u_origin.ip6[x], ip6h->saddr.in6_u.u6_addr32[x], BPF_F_PSEUDO_HDR | 4);
ip6h = (struct ipv6hdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(ip6h + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
protocol = ip6h->nexthdr;
tuple = (struct bpf_sock_tuple *)(void*)(long)&ip6h->saddr;
if(!tuple){
return TC_ACT_SHOT;
Expand All @@ -2292,9 +2378,6 @@ int bpf_sk_splice6(struct __sk_buff *skb){
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
if(protocol == IPPROTO_TCP){
tcp = true;
}
tcph = (struct tcphdr *)((unsigned long)ip6h + sizeof(*ip6h));
if ((unsigned long)(tcph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
Expand Down Expand Up @@ -2412,13 +2495,13 @@ int bpf_sk_splice6(struct __sk_buff *skb){
mk.protocol = IPPROTO_UDP;
insert_masquerade(mv, mk );
memcpy(ip6h->saddr.in6_u.u6_addr32, local_ip6->ipaddr[0], sizeof(local_ip6->ipaddr[0]));
/*Calculate l4 Checksum*/
for(int x = 0; x < 4; x++){
bpf_l4_csum_replace(skb, sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + offsetof(struct udphdr, check), mv.__in46_u_origin.ip6[x], ip6h->saddr.in6_u.u6_addr32[x], BPF_F_PSEUDO_HDR | 4);
ip6h = (struct ipv6hdr *)(skb->data + sizeof(*eth));
if ((unsigned long)(ip6h + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
protocol = ip6h->nexthdr;
tuple = (struct bpf_sock_tuple *)(void*)(long)&ip6h->saddr;
if(!tuple){
return TC_ACT_SHOT;
Expand All @@ -2427,9 +2510,6 @@ int bpf_sk_splice6(struct __sk_buff *skb){
if ((unsigned long)tuple + tuple_len > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
}
if(protocol == IPPROTO_TCP){
tcp = true;
}
udph = (struct udphdr *)((unsigned long)ip6h + sizeof(*ip6h));
if ((unsigned long)(udph + 1) > (unsigned long)skb->data_end){
return TC_ACT_SHOT;
Expand Down

0 comments on commit 59f37cd

Please sign in to comment.