From 93a59b6e9ac515550dc6eeecf5b175f3819a4831 Mon Sep 17 00:00:00 2001 From: duxbbo Date: Mon, 25 Jul 2022 07:07:37 +0000 Subject: [PATCH] add fib Signed-off-by: duxbbo --- code/linux/net/newip/nip_fib.c | 294 +++++++++++++ code/linux/net/newip/nip_fib_rules.c | 31 ++ code/linux/net/newip/nndisc.c | 602 +++++++++++++++++++++++++++ code/linux/net/newip/protocol.c | 39 ++ 4 files changed, 966 insertions(+) create mode 100644 code/linux/net/newip/nip_fib.c create mode 100644 code/linux/net/newip/nip_fib_rules.c create mode 100644 code/linux/net/newip/nndisc.c create mode 100644 code/linux/net/newip/protocol.c diff --git a/code/linux/net/newip/nip_fib.c b/code/linux/net/newip/nip_fib.c new file mode 100644 index 0000000..14750ea --- /dev/null +++ b/code/linux/net/newip/nip_fib.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * + * Linux NewIP INET implementation + * Forwarding Information Database + * + * Based on net/ipv6/ip6_fib.c + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +static struct kmem_cache *nip_fib_node_kmem __read_mostly; + +struct nip_fib_table *nip_fib_get_table(struct net *net, u32 id) +{ + if (id == NIP_RT_TABLE_MAIN) + return net->newip.nip_fib_main_tbl; + else if (id == NIP_RT_TABLE_LOCAL) + return net->newip.nip_fib_local_tbl; + else + return NULL; +} + +static struct nip_fib_node *nip_node_alloc(void) +{ + struct nip_fib_node *fn; + + fn = kmem_cache_zalloc(nip_fib_node_kmem, GFP_ATOMIC); + + return fn; +} + +void nip_rt_free_pcpu(struct nip_rt_info *non_pcpu_rt) +{ + int cpu; + + if (!non_pcpu_rt->rt_pcpu) + return; + + for_each_possible_cpu(cpu) { + struct nip_rt_info **ppcpu_rt; + struct nip_rt_info *pcpu_rt; + + ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt_pcpu, cpu); + pcpu_rt = *ppcpu_rt; + if (pcpu_rt) { + dst_dev_put(&pcpu_rt->dst); + dst_release(&pcpu_rt->dst); + *ppcpu_rt = NULL; + } + } + + free_percpu(non_pcpu_rt->rt_pcpu); + non_pcpu_rt->rt_pcpu = NULL; +} + +static u32 ninet_route_hash(const struct nip_addr *addr) +{ + return hash_32(nip_addr_hash(addr), NIN_ROUTE_HSIZE_SHIFT); +} + +struct nip_fib_node *nip_fib_locate(struct hlist_head *nip_tb_head, + const struct nip_addr *daddr) +{ + struct nip_fib_node *fib_node; + struct hlist_head *h; + unsigned int hash; + + hash = ninet_route_hash(daddr); + h = &nip_tb_head[hash]; + + hlist_for_each_entry_rcu(fib_node, h, fib_hlist) { + if (nip_addr_eq(&fib_node->nip_route_info->rt_dst, daddr)) + return fib_node; + } + + /* find default route */ + hash = ninet_route_hash(&nip_any_addr); + h = &nip_tb_head[hash]; + + hlist_for_each_entry_rcu(fib_node, h, fib_hlist) { + if (nip_addr_eq + (&fib_node->nip_route_info->rt_dst, &nip_any_addr)) { + return fib_node; + } + } + + return NULL; +} + +/* nip_tb_lock must be taken to avoid racing */ +int nip_fib_add(struct hlist_head *nip_tb_head, struct nip_rt_info *rt) +{ + struct nip_fib_node *fib_node, *new_node; + int err = 0; + struct hlist_head *h; + unsigned int hash; + + hash = ninet_route_hash(&rt->rt_dst); + h = &nip_tb_head[hash]; + + hlist_for_each_entry(fib_node, h, fib_hlist) { + if (nip_addr_eq(&fib_node->nip_route_info->rt_dst, + &rt->rt_dst)) { + err = -EEXIST; + goto fail; + } + } + + new_node = nip_node_alloc(); + if (!new_node) { + DEBUG("%s: fail to alloc mem.", __func__); + err = -ENOMEM; + goto fail; + } + new_node->nip_route_info = rt; + rcu_assign_pointer(rt->rt_node, new_node); + atomic_inc(&rt->rt_ref); + hlist_add_tail_rcu(&new_node->fib_hlist, h); + +out: + return err; + +fail: + dst_release_immediate(&rt->dst); + goto out; +} + +static void nip_fib_destroy_rcu(struct rcu_head *head) +{ + struct nip_fib_node *fn = container_of(head, struct nip_fib_node, rcu); + + nip_rt_release(fn->nip_route_info); + kfree(fn); +} + +/* nip_tb_lock must be taken to avoid racing */ +int nip_fib_del(struct nip_rt_info *rt, struct nl_info *info) +{ + struct nip_fib_node *fn; + struct net *net = info->nl_net; + + fn = rcu_dereference_protected(rt->rt_node, + lockdep_is_held(&rt->rt_table->nip_tb_lock)); + if (!fn || rt == net->newip.nip_null_entry) + return -ENOENT; + + hlist_del_init_rcu(&fn->fib_hlist); + + /* 当fib_node释放后,fib_node指向的route_info才可释放 */ + RCU_INIT_POINTER(rt->rt_node, NULL); + call_rcu(&fn->rcu, nip_fib_destroy_rcu); + + return 0; +} + +static void nip_fib_free_table(struct nip_fib_table *table) +{ + kfree(table); +} + +/* caller must hold nip_tb_lock */ +static void nip_fib_clean_hash(struct net *net, struct hlist_head *nip_tb_head, + int (*func)(struct nip_rt_info *, void *arg), + void *arg) +{ + int i; + struct nip_fib_node *fn; + struct hlist_node *tmp; + struct nl_info info = { + .nl_net = net, + }; + + for (i = 0; i < NIN_ROUTE_HSIZE; i++) { + struct hlist_head *h = &nip_tb_head[i]; + + hlist_for_each_entry_safe(fn, tmp, h, fib_hlist) { + if (func(fn->nip_route_info, arg) < 0) { + DEBUG("%s: try to del nip_rt_info\n", __func__); + nip_fib_del(fn->nip_route_info, &info); + } + } + } +} + +void nip_fib_clean_all(struct net *net, + int (*func)(struct nip_rt_info *, void *arg), void *arg) +{ + struct nip_fib_table *main_tbl = net->newip.nip_fib_main_tbl; + struct nip_fib_table *local_tbl = net->newip.nip_fib_local_tbl; + + spin_lock_bh(&main_tbl->nip_tb_lock); + nip_fib_clean_hash(net, main_tbl->nip_tb_head, func, arg); + spin_unlock_bh(&main_tbl->nip_tb_lock); + + spin_lock_bh(&local_tbl->nip_tb_lock); + nip_fib_clean_hash(net, local_tbl->nip_tb_head, func, arg); + spin_unlock_bh(&local_tbl->nip_tb_lock); +} + +static void nip_fib_link_table(struct nip_fib_table *tb) +{ + /* You need to initialize multiple routing tables */ + spin_lock_init(&tb->nip_tb_lock); +} + +static void __net_init nip_fib_tables_init(struct net *net) +{ + nip_fib_link_table(net->newip.nip_fib_main_tbl); + nip_fib_link_table(net->newip.nip_fib_local_tbl); +} + +static int __net_init nip_fib_net_init(struct net *net) +{ + net->newip.nip_fib_main_tbl = + kzalloc(sizeof(*net->newip.nip_fib_main_tbl), GFP_KERNEL); + if (!net->newip.nip_fib_main_tbl) + goto out_fib_table_hash; + + net->newip.nip_fib_main_tbl->nip_tb_id = NIP_RT_TABLE_MAIN; + net->newip.nip_fib_main_tbl->flags = 1; + + net->newip.nip_fib_local_tbl = + kzalloc(sizeof(*net->newip.nip_fib_local_tbl), GFP_KERNEL); + if (!net->newip.nip_fib_local_tbl) + goto out_main_tbl; + + net->newip.nip_fib_local_tbl->nip_tb_id = NIP_RT_TABLE_LOCAL; + + nip_fib_tables_init(net); + + return 0; + +out_main_tbl: + kfree(net->newip.nip_fib_main_tbl); +out_fib_table_hash: + return -ENOMEM; +} + +static void nip_fib_net_exit(struct net *net) +{ + nip_fib_free_table(net->newip.nip_fib_main_tbl); + nip_fib_free_table(net->newip.nip_fib_local_tbl); +} + +static struct pernet_operations nip_fib_net_ops = { + .init = nip_fib_net_init, + .exit = nip_fib_net_exit, +}; + +int __init nip_fib_init(void) +{ + int ret = -ENOMEM; + + nip_fib_node_kmem = kmem_cache_create("nip_fib_nodes", + sizeof(struct nip_fib_node), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!nip_fib_node_kmem) + goto out; + + DEBUG("nip_fib_node size is %lu\n", + sizeof(struct nip_fib_node) + sizeof(struct nip_rt_info)); + + ret = register_pernet_subsys(&nip_fib_net_ops); + if (ret) + goto out_kmem_cache_create; + +out: + return ret; + +out_kmem_cache_create: + kmem_cache_destroy(nip_fib_node_kmem); + goto out; +} + +void nip_fib_gc_cleanup(void) +{ + unregister_pernet_subsys(&nip_fib_net_ops); + kmem_cache_destroy(nip_fib_node_kmem); +} + diff --git a/code/linux/net/newip/nip_fib_rules.c b/code/linux/net/newip/nip_fib_rules.c new file mode 100644 index 0000000..1f3481b --- /dev/null +++ b/code/linux/net/newip/nip_fib_rules.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * + * NewIP Routing Policy Rules + * + * Based on net/ipv6/fib_rules.c + * Based on net/ipv6/fib6_rules.c + */ +#include +#include +#include +#include + +struct dst_entry *nip_fib_rule_lookup(struct net *net, struct flow_nip *fln, + int flags, nip_pol_lookup_t lookup) +{ + struct nip_rt_info *rt; + + rt = lookup(net, net->newip.nip_fib_local_tbl, fln, flags); + if (rt != net->newip.nip_null_entry) + return &rt->dst; + nip_rt_put(rt); + rt = lookup(net, net->newip.nip_fib_main_tbl, fln, flags); + if (rt != net->newip.nip_null_entry) + return &rt->dst; + nip_rt_put(rt); + + dst_hold(&net->newip.nip_null_entry->dst); + return &net->newip.nip_null_entry->dst; +} diff --git a/code/linux/net/newip/nndisc.c b/code/linux/net/newip/nndisc.c new file mode 100644 index 0000000..02ac3e6 --- /dev/null +++ b/code/linux/net/newip/nndisc.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * + * Neighbour Discovery for NewIP + * Linux NewIP INET implementation + * + * Based on net/ipv6/ndisc.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nip_hdr.h" +#include "nip_checksum.h" + +/* NUD_INCOMPLETE + * The neighbor request packet has been sent but no response has been received + * NUD_REACHABLE + * Reachable: Indicates that the neighbor is reachable + * NUD_STAL + * Idle state, which has not been confirmed for a long time, + * and the idle time exceeds the rated time + * NUD_DELAY + * If the acknowledgment time expires but the idle time does not exceed the rated time, + * you need to obtain the acknowledgment packet + * NUD_PROBE + * After NUD_DELAY does not receive confirmation for a long time, ARP request messages are sent + * NUD_FAILED + * The neighbor is unreachable + * NUD_NOARP + * Indicates the status of the neighbor that does not need the ARP status change + * NUD_PERMANENT + * Indicates that the status of the neighbor item is permanent and does not need to change + * NUD_NONE + * Initialization status of the neighbor item + */ +static void nndisc_solicit(struct neighbour *neigh, struct sk_buff *skb); + +static u32 nndisc_hash(const void *pkey, + const struct net_device *dev, __u32 *fhash_rnd); +static bool nndisc_key_eq(const struct neighbour *neigh, const void *pkey); +static int nndisc_constructor(struct neighbour *neigh); + +static void nndisc_error_report(struct neighbour *neigh, struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static const struct neigh_ops nndisc_generic_ops = { + .family = AF_NINET, + .solicit = nndisc_solicit, + .output = neigh_resolve_output, + .connected_output = neigh_connected_output, +}; + +static const struct neigh_ops nndisc_hh_ops = { + .family = AF_NINET, + .solicit = nndisc_solicit, + .error_report = nndisc_error_report, + .output = neigh_resolve_output, + .connected_output = neigh_resolve_output, +}; + +static const struct neigh_ops nndisc_direct_ops = { + .family = AF_NINET, + .output = neigh_direct_output, + .connected_output = neigh_direct_output, +}; + +#define NIP_NEIGH_MCAST_PROBES 4 +#define NIP_NEIGH_UCAST_PROBES 4 +#define NIP_NEIGH_DELAY_PROBE_TIME (5 * HZ) +#define NIP_NEIGH_GC_STALETIME (60 * HZ) +#define NIP_NEIGH_QUEUE_LEN_BYTES (64 * 1024) +#define NIP_NEIGH_PROXY_QLEN 64 +#define NIP_NEIGH_ANYCAST_DELAY (1 * HZ) +#define NIP_NEIGH_PROXY_DELAY ((8 * HZ) / 10) +#define NIP_NEIGH_GC_INTERVAL (30 * HZ) +#define NIP_NEIGH_GC_THRESH_1 128 +#define NIP_NEIGH_GC_THRESH_2 512 +#define NIP_NEIGH_GC_THRESH_3 1024 + +struct neigh_table nnd_tbl = { + .family = AF_NINET, + .key_len = sizeof(struct nip_addr), + .protocol = cpu_to_be16(ETH_P_NEWIP), + .hash = nndisc_hash, + .key_eq = nndisc_key_eq, + .constructor = nndisc_constructor, + .id = "nndisc_cache", + .parms = { + .tbl = &nnd_tbl, + .reachable_time = ND_REACHABLE_TIME, + .data = { + [NEIGH_VAR_MCAST_PROBES] = NIP_NEIGH_MCAST_PROBES, + [NEIGH_VAR_UCAST_PROBES] = NIP_NEIGH_UCAST_PROBES, + [NEIGH_VAR_RETRANS_TIME] = ND_RETRANS_TIMER, + [NEIGH_VAR_BASE_REACHABLE_TIME] = ND_REACHABLE_TIME, + [NEIGH_VAR_DELAY_PROBE_TIME] = NIP_NEIGH_DELAY_PROBE_TIME, + [NEIGH_VAR_GC_STALETIME] = NIP_NEIGH_GC_STALETIME, + [NEIGH_VAR_QUEUE_LEN_BYTES] = NIP_NEIGH_QUEUE_LEN_BYTES, + [NEIGH_VAR_PROXY_QLEN] = NIP_NEIGH_PROXY_QLEN, + [NEIGH_VAR_ANYCAST_DELAY] = NIP_NEIGH_ANYCAST_DELAY, + [NEIGH_VAR_PROXY_DELAY] = NIP_NEIGH_PROXY_DELAY, + }, + }, + .gc_interval = NIP_NEIGH_GC_INTERVAL, + .gc_thresh1 = NIP_NEIGH_GC_THRESH_1, + .gc_thresh2 = NIP_NEIGH_GC_THRESH_2, + .gc_thresh3 = NIP_NEIGH_GC_THRESH_3, +}; + +static u32 nndisc_hash(const void *pkey, + const struct net_device *dev, __u32 *hash_rnd) +{ + return nndisc_hashfn(pkey, dev, hash_rnd); +} + +static bool nndisc_key_eq(const struct neighbour *n, const void *pkey) +{ + return neigh_key_eq800(n, pkey); +} + +static int nndisc_constructor(struct neighbour *neigh) +{ + struct nip_addr *addr = (struct nip_addr *)&neigh->primary_key; + struct net_device *dev = neigh->dev; + struct ninet_dev *nin_dev; + struct neigh_parms *parms; + bool is_broadcast = (bool)nip_addr_eq(addr, &nip_broadcast_addr_arp); + + nin_dev = nin_dev_get(dev); + if (!nin_dev) + return -EINVAL; + + parms = nin_dev->nd_parms; + __neigh_parms_put(neigh->parms); + neigh->parms = neigh_parms_clone(parms); + neigh->type = RTN_UNICAST; + if (!dev->header_ops) { + neigh->nud_state = NUD_NOARP; + neigh->ops = &nndisc_direct_ops; + neigh->output = neigh_direct_output; + } else { + if (is_broadcast || + (dev->flags & IFF_POINTOPOINT)) { + neigh->nud_state = NUD_NOARP; + memcpy(neigh->ha, dev->broadcast, dev->addr_len); + } else if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) { + neigh->nud_state = NUD_NOARP; + memcpy(neigh->ha, dev->dev_addr, dev->addr_len); + if (dev->flags & IFF_LOOPBACK) + neigh->type = RTN_LOCAL; + } + + if (dev->header_ops->cache) + neigh->ops = &nndisc_hh_ops; + else + neigh->ops = &nndisc_generic_ops; + + if (neigh->nud_state & NUD_VALID) + neigh->output = neigh->ops->connected_output; + else + neigh->output = neigh->ops->output; + } + + nin_dev_put(nin_dev); + + return 0; +} + +void nip_insert_nndisc_send_checksum(struct sk_buff *skb, u_short checksum) +{ +#define NNDISC_CHECKSUM_BIAS 2 + *(__u16 *)(skb_transport_header(skb) + NNDISC_CHECKSUM_BIAS) = + htons(checksum); +} + +unsigned short nip_get_nndisc_send_checksum(struct sk_buff *skb, + struct nip_hdr_encap *head, + int payload_len) +{ + struct nip_pseudo_header nph = {0}; + + nph.nexthdr = head->nexthdr; + nph.saddr = head->saddr; + nph.daddr = head->daddr; + nph.check_len = htons(payload_len); + return nip_check_sum_build(skb_transport_header(skb), + payload_len, &nph); +} + +bool nip_get_nndisc_rcv_checksum(struct sk_buff *skb, + u_char *transport_tail) +{ + struct nip_pseudo_header nph = {0}; + unsigned short check_len = (unsigned short)(transport_tail - (skb_transport_header(skb))); + + nph.nexthdr = NIPCB(skb)->nexthdr; + nph.saddr = NIPCB(skb)->srcaddr; + nph.daddr = NIPCB(skb)->dstaddr; + nph.check_len = htons(check_len); + + return nip_check_sum_parse(skb_transport_header(skb), check_len, &nph) + == 0xffff ? true : false; +} + +static void nndisc_payload_ns_pack(const struct nip_addr *solicit, + struct sk_buff *skb) +{ + struct nnd_msg *msg = (struct nnd_msg *)skb->data; + u_char *p = msg->data; + + memset(&msg->icmph, 0, sizeof(msg->icmph)); + msg->icmph.nip_icmp_type = NIP_ARP_NS; + msg->icmph.nip_icmp_cksum = 0; + p = build_nip_addr(solicit, p); +} + +static struct dst_entry *nndisc_dst_alloc(struct net_device *dev) +{ + struct nip_rt_info *rt; + struct net *net = dev_net(dev); + + rt = nip_dst_alloc(net, dev, 0); + if (!rt) + return NULL; + + rt->dst.flags |= DST_HOST; + rt->dst.input = nip_input; + rt->dst.output = nip_output; + atomic_set(&rt->dst.__refcnt, 1); + + return &rt->dst; +} + +static int get_ns_payload_len(const struct nip_addr *solicit) +{ + return sizeof(struct nip_icmp_hdr) + get_nip_addr_len(solicit); +} + +static void nndisc_send_ns(struct net_device *dev, + const struct nip_addr *solicit, + const struct nip_addr *daddr, + const struct nip_addr *saddr) +{ + int ret; + struct sk_buff *skb; + struct dst_entry *dst; + struct net *net; + struct sock *sk = NULL; + int payload_len = get_ns_payload_len(solicit); + int len = NIP_ETH_HDR_LEN + NIP_HDR_MAX + payload_len; + struct nip_hdr_encap head = {0}; + unsigned short checksum; + + head.saddr = *saddr; + head.daddr = *daddr; + head.ttl = NIP_ARP_DEFAULT_TTL; + head.nexthdr = IPPROTO_NIP_ICMP; + + skb = alloc_skb(len, 0); + if (!skb) { + DEBUG("%s: no space for skbuff!", __func__); + return; + } + + skb->protocol = htons(ETH_P_NEWIP); + skb->dev = dev; + skb->ip_summed = CHECKSUM_NONE; + skb->csum = 0; + memset(NIPCB(skb), 0, sizeof(struct ninet_skb_parm)); + + NIPCB(skb)->dstaddr = head.daddr; + NIPCB(skb)->srcaddr = head.saddr; + NIPCB(skb)->nexthdr = head.nexthdr; + + /* reserve space for hardware header */ + skb_reserve(skb, NIP_ETH_HDR_LEN); + skb_reset_network_header(skb); + + /* build nwk header */ + head.hdr_buf = (unsigned char *)skb->data; + nip_hdr_comm_encap(&head); + head.total_len = head.hdr_buf_pos + payload_len; + nip_update_total_len(&head, htons(head.total_len)); + skb_reserve(skb, head.hdr_buf_pos); + skb_reset_transport_header(skb); + + /* build transport header */ + nndisc_payload_ns_pack(solicit, skb); + skb_reserve(skb, payload_len); + + skb->data = skb_network_header(skb); + skb->len = head.hdr_buf_pos + payload_len; + + dst = nndisc_dst_alloc(dev); + if (!dst) { + kfree_skb(skb); + return; + } + + /* add check sum*/ + checksum = nip_get_nndisc_send_checksum(skb, &head, payload_len); + nip_insert_nndisc_send_checksum(skb, checksum); + + skb_dst_set(skb, dst); + net = dev_net(skb->dev); + + /* DST is set to SKB, and output is used to release SKB regardless of success or failure */ + ret = dst_output(net, sk, skb); + if (ret) + DEBUG("%s: dst output fail.", __func__); +} + +static void nndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) +{ + struct net_device *dev = neigh->dev; + struct nip_addr *target = (struct nip_addr *)&neigh->primary_key; + struct nip_addr *saddr = NULL; + struct ninet_dev *idev; + + /* Obtain the NewIP address from the current dev as + * the source address of the request packet + */ + rcu_read_lock(); + idev = __nin_dev_get(dev); + if (idev) { + read_lock_bh(&idev->lock); + if (!list_empty(&idev->addr_list)) { + struct ninet_ifaddr *ifp; + + list_for_each_entry(ifp, &idev->addr_list, if_list) { + saddr = &ifp->addr; + nndisc_send_ns(dev, target, + &nip_broadcast_addr_arp, + saddr); + } + } + read_unlock_bh(&idev->lock); + } else { + DEBUG("%s:idev don't exist.", __func__); + } + rcu_read_unlock(); +} + +static void build_na_hdr(u_char *smac, u_char mac_len, struct sk_buff *skb) +{ + struct nnd_msg *msg = (struct nnd_msg *)skb->data; + u_char *p = msg->data; + + memset(&msg->icmph, 0, sizeof(msg->icmph)); + msg->icmph.nip_icmp_type = NIP_ARP_NA; + msg->icmph.nip_icmp_cksum = 0; + *p = mac_len; + p++; + memcpy(p, smac, mac_len); +} + +static int get_na_payload_len(struct net_device *dev) +{ + /* Icmp Header Length + * Number of bytes in the MAC address length field + * MAC Address Length + */ + return sizeof(struct nip_icmp_hdr) + 1 + dev->addr_len; +} + +static void nndisc_send_na(struct net_device *dev, + const struct nip_addr *daddr, + const struct nip_addr *saddr) +{ + int ret; + struct sk_buff *skb = NULL; + struct dst_entry *dst = NULL; + struct sock *sk = NULL; + int csummode = CHECKSUM_NONE; + int payload_len = get_na_payload_len(dev); + int len = NIP_ETH_HDR_LEN + NIP_HDR_MAX + payload_len; + u_char *smac = dev->dev_addr; + struct nip_hdr_encap head = {0}; + u_short checksum = 0; + + head.saddr = *saddr; + head.daddr = *daddr; + head.ttl = NIP_ARP_DEFAULT_TTL; + head.nexthdr = IPPROTO_NIP_ICMP; + + skb = alloc_skb(len, 0); + if (!skb) { + DEBUG("%s: no space for skbuff!", __func__); + return; + } + skb->protocol = htons(ETH_P_NEWIP); + skb->ip_summed = csummode; + skb->csum = 0; + skb->dev = dev; + memset(NIPCB(skb), 0, sizeof(struct ninet_skb_parm)); + + NIPCB(skb)->dstaddr = head.daddr; + NIPCB(skb)->srcaddr = head.saddr; + NIPCB(skb)->nexthdr = head.nexthdr; + + /* reserve space for hardware header */ + skb_reserve(skb, NIP_ETH_HDR_LEN); + skb_reset_network_header(skb); + + /* build nwk header */ + head.hdr_buf = (unsigned char *)skb->data; + nip_hdr_comm_encap(&head); + head.total_len = head.hdr_buf_pos + payload_len; + nip_update_total_len(&head, htons(head.total_len)); + skb_reserve(skb, head.hdr_buf_pos); + skb_reset_transport_header(skb); + + /* build na header */ + build_na_hdr(smac, dev->addr_len, skb); + + /* skip transport hdr */ + skb_reserve(skb, payload_len); + + /* set skb->data to point network header */ + skb->data = skb_network_header(skb); + skb->len = head.hdr_buf_pos + payload_len; + + dst = nndisc_dst_alloc(dev); + if (!dst) { + kfree_skb(skb); + return; + } + + /* add check sum*/ + checksum = nip_get_nndisc_send_checksum(skb, &head, payload_len); + nip_insert_nndisc_send_checksum(skb, checksum); + + skb_dst_set(skb, dst); + ret = dst_output(dev_net(skb->dev), sk, skb); + if (ret) + DEBUG("%s: dst output fail.", __func__); +} + +bool nip_addr_local(struct net_device *dev, struct nip_addr *addr) +{ + struct ninet_dev *idev; + bool ret = false; + + rcu_read_lock(); + idev = __nin_dev_get(dev); + if (idev) { + read_lock_bh(&idev->lock); + if (!list_empty(&idev->addr_list)) { + struct ninet_ifaddr *ifp; + + list_for_each_entry(ifp, &idev->addr_list, if_list) { + if (nip_addr_eq(addr, &ifp->addr)) { + ret = true; + break; + } + } + } + read_unlock_bh(&idev->lock); + } + rcu_read_unlock(); + + return ret; +} + +int nndisc_rcv_ns(struct sk_buff *skb) +{ + struct nnd_msg *msg = (struct nnd_msg *)skb_transport_header(skb); + u_char *p = msg->data; + u_char *lladdr; + struct nip_addr addr = {0}; + struct neighbour *neigh; + struct ethhdr *eth; + struct net_device *dev = skb->dev; + int err = 0; + + p = decode_nip_addr(p, &addr); + if (!p) { + DEBUG("failure when decode source address!"); + err = -EFAULT; + goto out; + } + + if (nip_addr_invalid(&addr)) { + DEBUG("%s: icmp hdr addr invalid.", __func__); + err = -EFAULT; + goto out; + } + + if (!nip_addr_local(dev, &addr)) { + err = -ENXIO; + goto out; + } + + eth = (struct ethhdr *)skb_mac_header(skb); + lladdr = eth->h_source; + + /* checksum parse*/ + if (!nip_get_nndisc_rcv_checksum(skb, p)) { + DEBUG("%s:ns ICMP checksum failed, drop the packet", __func__); + err = -EINVAL; + goto out; + } + + neigh = __neigh_lookup(&nnd_tbl, &NIPCB(skb)->srcaddr, dev, lladdr || + !dev->addr_len); + if (neigh) { + neigh_update(neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_OVERRIDE, + 0); + neigh_release(neigh); + } + + nndisc_send_na(dev, &NIPCB(skb)->srcaddr, &addr); +out: + kfree_skb(skb); + return err; +} + +int nndisc_rcv_na(struct sk_buff *skb) +{ + struct nnd_msg *msg = (struct nnd_msg *)skb_transport_header(skb); + u_char *p = msg->data; + u_char len; + u8 lladdr[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; + struct net_device *dev = skb->dev; + struct neighbour *neigh; + + len = *p; + p++; + memset(lladdr, 0, ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))); + memcpy(lladdr, p, len); + + if (!nip_get_nndisc_rcv_checksum(skb, p + len)) { + DEBUG("%s:na ICMP checksum failed! drop the packet!" + , __func__); + kfree_skb(skb); + return 0; + } + + neigh = neigh_lookup(&nnd_tbl, &NIPCB(skb)->srcaddr, dev); + if (neigh) { + neigh_update(neigh, lladdr, NUD_REACHABLE, + NEIGH_UPDATE_F_OVERRIDE, 0); + neigh_release(neigh); + kfree_skb(skb); + return 0; + } + kfree_skb(skb); + return -EFAULT; +} + +int nndisc_rcv(struct sk_buff *skb) +{ + int ret = 0; + struct nip_icmp_hdr *hdr = nip_icmp_header(skb); + u8 type = hdr->nip_icmp_type; + + switch (type) { + case NIP_ARP_NS: + ret = nndisc_rcv_ns(skb); + break; + case NIP_ARP_NA: + ret = nndisc_rcv_na(skb); + break; + default: + DEBUG("arp packet type error"); + } + + return ret; +} + +int __init nndisc_init(void) +{ + neigh_table_init(NEIGH_NND_TABLE, &nnd_tbl); + return 0; +} diff --git a/code/linux/net/newip/protocol.c b/code/linux/net/newip/protocol.c new file mode 100644 index 0000000..1f35d9e --- /dev/null +++ b/code/linux/net/newip/protocol.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * + * NewIP INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. NewIP INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * NewIP INET protocol dispatch tables. + * + * Based on net/ipv6/protocol.c + */ +#include +#include +#include +#include + +const struct ninet_protocol __rcu *ninet_protos[MAX_INET_PROTOS] __read_mostly; + +int ninet_add_protocol(const struct ninet_protocol *prot, + unsigned char protocol) +{ + return !cmpxchg((const struct ninet_protocol **)&ninet_protos[protocol], + NULL, prot) ? 0 : -1; +} + +int ninet_del_protocol(const struct ninet_protocol *prot, + unsigned char protocol) +{ + int ret; + + ret = (cmpxchg((const struct ninet_protocol **)&ninet_protos[protocol], + prot, NULL) == prot) ? 0 : -1; + + synchronize_net(); + + return ret; +} + -- Gitee