diff --git a/tests/build_netmap.bash b/tests/build_netmap.bash index 66524a2..1782284 100755 --- a/tests/build_netmap.bash +++ b/tests/build_netmap.bash @@ -1,3 +1,5 @@ #!/bin/bash -g++ netmap.cpp -I/usr/src/netmap/sys/ -onetmap +clang++ fastnetmon_packet_parser.cpp -c -ofastnetmon_packet_parser.o +clang++ netmap.cpp -I/usr/local/include -L/usr/local/lib -I/usr/src/fastnetmon/tests/netmap_includes -lboost_thread -lboost_system fastnetmon_packet_parser.o + diff --git a/tests/fastnetmon_packet_parser.cpp b/tests/fastnetmon_packet_parser.cpp new file mode 100644 index 0000000..f16d368 --- /dev/null +++ b/tests/fastnetmon_packet_parser.cpp @@ -0,0 +1,819 @@ +#include "fastnetmon_packet_parser.h" + +/* This code is copy & paste from PF_RING user space library licensed under LGPL terms */ + +#include // For support uint32_t, uint16_t +#include // gettimeofday +#include +#include +#include // in6_addr +#include +#include // memcpy +#include +#include // inet_ntop + +#if defined(__FreeBSD__) || defined(__APPLE__) +#include // AF_INET6 +#endif + +// Fake fields +#define ipv4_tos ip_tos +#define ipv6_tos ip_tos +#define ipv4_src ip_src.v4 +#define ipv4_dst ip_dst.v4 +#define ipv6_src ip_src.v6 +#define ipv6_dst ip_dst.v6 +#define host4_low host_low.v4 +#define host4_high host_high.v4 +#define host6_low host_low.v6 +#define host6_high host_high.v6 +#define host4_peer_a host_peer_a.v4 +#define host4_peer_b host_peer_b.v4 +#define host6_peer_a host_peer_a.v6 +#define host6_peer_b host_peer_b.v6 + +// GRE tunnels +#define GRE_HEADER_CHECKSUM 0x8000 +#define GRE_HEADER_ROUTING 0x4000 +#define GRE_HEADER_KEY 0x2000 +#define GRE_HEADER_SEQ_NUM 0x1000 +#define GRE_HEADER_VERSION 0x0007 + +struct gre_header { + u_int16_t flags_and_version; + u_int16_t proto; + /* Optional fields */ +}; + + +// GTP tunnels +#define GTP_SIGNALING_PORT 2123 +#define GTP_U_DATA_PORT 2152 + +#define GTP_VERSION_1 0x1 +#define GTP_VERSION_2 0x2 +#define GTP_PROTOCOL_TYPE 0x1 + +#define GTP_VERSION_1 0x1 +#define GTP_VERSION_2 0x2 +#define GTP_PROTOCOL_TYPE 0x1 + +struct gtp_v1_hdr { +#define GTP_FLAGS_VERSION 0xE0 +#define GTP_FLAGS_VERSION_SHIFT 5 +#define GTP_FLAGS_PROTOCOL_TYPE 0x10 +#define GTP_FLAGS_RESERVED 0x08 +#define GTP_FLAGS_EXTENSION 0x04 +#define GTP_FLAGS_SEQ_NUM 0x02 +#define GTP_FLAGS_NPDU_NUM 0x01 + u_int8_t flags; + u_int8_t message_type; + u_int16_t payload_len; + u_int32_t teid; +} __attribute__((__packed__)); + +/* Optional: GTP_FLAGS_EXTENSION | GTP_FLAGS_SEQ_NUM | GTP_FLAGS_NPDU_NUM */ +struct gtp_v1_opt_hdr { + u_int16_t seq_num; + u_int8_t npdu_num; + u_int8_t next_ext_hdr; +} __attribute__((__packed__)); + +/* Optional: GTP_FLAGS_EXTENSION && next_ext_hdr != 0 */ +struct gtp_v1_ext_hdr { +#define GTP_EXT_HDR_LEN_UNIT_BYTES 4 + u_int8_t len; /* 4-byte unit */ + /* + * u_char contents[len*4-2]; + * u_int8_t next_ext_hdr; + */ +} __attribute__((__packed__)); + +#define NO_TUNNEL_ID 0xFFFFFFFF + +#define NEXTHDR_HOP 0 +#define NEXTHDR_TCP 6 +#define NEXTHDR_UDP 17 +#define NEXTHDR_IPV6 41 +#define NEXTHDR_ROUTING 43 +#define NEXTHDR_FRAGMENT 44 +#define NEXTHDR_ESP 50 +#define NEXTHDR_AUTH 51 +#define NEXTHDR_ICMP 58 +#define NEXTHDR_NONE 59 +#define NEXTHDR_DEST 60 +#define NEXTHDR_MOBILITY 135 + +// TCP flags +#define TH_FIN_MULTIPLIER 0x01 +#define TH_SYN_MULTIPLIER 0x02 +#define TH_RST_MULTIPLIER 0x04 +#define TH_PUSH_MULTIPLIER 0x08 +#define TH_ACK_MULTIPLIER 0x10 +#define TH_URG_MULTIPLIER 0x20 + +#define __LITTLE_ENDIAN_BITFIELD /* FIX */ + +struct tcphdr { + u_int16_t source; + u_int16_t dest; + u_int32_t seq; + u_int32_t ack_seq; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u_int16_t res1:4, + doff:4, + fin:1, + syn:1, + rst:1, + psh:1, + ack:1, + urg:1, + ece:1, + cwr:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + u_int16_t doff:4, + res1:4, + cwr:1, + ece:1, + urg:1, + ack:1, + psh:1, + rst:1, + syn:1, + fin:1; +#else +#error "Adjust your defines" +#endif + u_int16_t window; + u_int16_t check; + u_int16_t urg_ptr; +}; + +struct udphdr { + u_int16_t source; + u_int16_t dest; + u_int16_t len; + u_int16_t check; +}; + +struct eth_vlan_hdr { + u_int16_t h_vlan_id; /* Tag Control Information (QoS, VLAN ID) */ + u_int16_t h_proto; /* packet type ID field */ +}; + +struct kcompact_ipv6_hdr { + u_int8_t priority:4, + version:4; + u_int8_t flow_lbl[3]; + u_int16_t payload_len; + u_int8_t nexthdr; + u_int8_t hop_limit; + struct in6_addr saddr; + struct in6_addr daddr; +}; + +struct kcompact_ipv6_opt_hdr { + u_int8_t nexthdr; + u_int8_t hdrlen; + u_int8_t padding[6]; +} __attribute__((packed)); + +#define __LITTLE_ENDIAN_BITFIELD /* FIX */ +struct iphdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u_int8_t ihl:4, + version:4; +#elif defined (__BIG_ENDIAN_BITFIELD) + u_int8_t version:4, + ihl:4; +#else +#error "Please fix " +#endif + u_int8_t tos; + u_int16_t tot_len; + u_int16_t id; +#define IP_CE 0x8000 +#define IP_DF 0x4000 +#define IP_MF 0x2000 +#define IP_OFFSET 0x1FFF + u_int16_t frag_off; + u_int8_t ttl; + u_int8_t protocol; + u_int16_t check; + u_int32_t saddr; + u_int32_t daddr; + /*The options start here. */ +}; + +// Prototypes +char *etheraddr2string(const u_char *ep, char *buf); +char *intoa(unsigned int addr); +char *_intoa(unsigned int addr, char* buf, u_short bufLen); +static char *in6toa(struct in6_addr addr6); +char *proto2str(u_short proto); + +#if defined(__FreeBSD__) || defined(__APPLE__) +/* This code from /usr/includes/linux/if_ether.h Linus file */ +#define ETH_ALEN 6 /* Octets in one ethernet addr */ +#define ETH_P_IP 0x0800 /* Internet Protocol packet */ +#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ + + +/* + * This is an Ethernet frame header. + */ + +struct ethhdr { + unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ + unsigned char h_source[ETH_ALEN]; /* source ether addr */ + u_int16_t h_proto; /* packet type ID field */ +} __attribute__((packed)); + +#endif + +#if defined(__FreeBSD__) || defined(__APPLE__) +u_int32_t pfring_hash_pkt(struct pfring_pkthdr *hdr) { + if (hdr->extended_hdr.parsed_pkt.tunnel.tunnel_id == NO_TUNNEL_ID) { + return + hdr->extended_hdr.parsed_pkt.vlan_id + + hdr->extended_hdr.parsed_pkt.l3_proto + + hdr->extended_hdr.parsed_pkt.ip_src.v6.__u6_addr.__u6_addr32[0] + + hdr->extended_hdr.parsed_pkt.ip_src.v6.__u6_addr.__u6_addr32[1] + + hdr->extended_hdr.parsed_pkt.ip_src.v6.__u6_addr.__u6_addr32[2] + + hdr->extended_hdr.parsed_pkt.ip_src.v6.__u6_addr.__u6_addr32[3] + + hdr->extended_hdr.parsed_pkt.ip_dst.v6.__u6_addr.__u6_addr32[0] + + hdr->extended_hdr.parsed_pkt.ip_dst.v6.__u6_addr.__u6_addr32[1] + + hdr->extended_hdr.parsed_pkt.ip_dst.v6.__u6_addr.__u6_addr32[2] + + hdr->extended_hdr.parsed_pkt.ip_dst.v6.__u6_addr.__u6_addr32[3] + + hdr->extended_hdr.parsed_pkt.l4_src_port + + hdr->extended_hdr.parsed_pkt.l4_dst_port; + } else { + return + hdr->extended_hdr.parsed_pkt.vlan_id + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v6.__u6_addr.__u6_addr32[1] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v6.__u6_addr.__u6_addr32[2] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v6.__u6_addr.__u6_addr32[3] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6.__u6_addr.__u6_addr32[0] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6.__u6_addr.__u6_addr32[1] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6.__u6_addr.__u6_addr32[2] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6.__u6_addr.__u6_addr32[3] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_l4_src_port + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_l4_dst_port; + } +} +#else +u_int32_t pfring_hash_pkt(struct pfring_pkthdr *hdr) { + if (hdr->extended_hdr.parsed_pkt.tunnel.tunnel_id == NO_TUNNEL_ID) { + return + hdr->extended_hdr.parsed_pkt.vlan_id + + hdr->extended_hdr.parsed_pkt.l3_proto + + hdr->extended_hdr.parsed_pkt.ip_src.v6.s6_addr32[0] + + hdr->extended_hdr.parsed_pkt.ip_src.v6.s6_addr32[1] + + hdr->extended_hdr.parsed_pkt.ip_src.v6.s6_addr32[2] + + hdr->extended_hdr.parsed_pkt.ip_src.v6.s6_addr32[3] + + hdr->extended_hdr.parsed_pkt.ip_dst.v6.s6_addr32[0] + + hdr->extended_hdr.parsed_pkt.ip_dst.v6.s6_addr32[1] + + hdr->extended_hdr.parsed_pkt.ip_dst.v6.s6_addr32[2] + + hdr->extended_hdr.parsed_pkt.ip_dst.v6.s6_addr32[3] + + hdr->extended_hdr.parsed_pkt.l4_src_port + + hdr->extended_hdr.parsed_pkt.l4_dst_port; + } else { + return + hdr->extended_hdr.parsed_pkt.vlan_id + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v6.s6_addr32[1] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v6.s6_addr32[2] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v6.s6_addr32[3] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6.s6_addr32[0] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6.s6_addr32[1] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6.s6_addr32[2] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6.s6_addr32[3] + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_l4_src_port + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_l4_dst_port; + } +} +#endif +static int __pfring_parse_tunneled_pkt(u_char *pkt, struct pfring_pkthdr *hdr, u_int16_t ip_version, u_int16_t tunnel_offset) { + u_int32_t ip_len = 0; + u_int16_t fragment_offset = 0; + + if(ip_version == 4 /* IPv4 */ ) { + struct iphdr *tunneled_ip; + + if(hdr->caplen < (tunnel_offset+sizeof(struct iphdr))) + return 0; + + tunneled_ip = (struct iphdr *) (&pkt[tunnel_offset]); + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto = tunneled_ip->protocol; + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v4 = ntohl(tunneled_ip->saddr); + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v4 = ntohl(tunneled_ip->daddr); + + fragment_offset = tunneled_ip->frag_off & htons(IP_OFFSET); /* fragment, but not the first */ + ip_len = tunneled_ip->ihl*4; + tunnel_offset += ip_len; + + } else if(ip_version == 6 /* IPv6 */ ) { + struct kcompact_ipv6_hdr *tunneled_ipv6; + + if(hdr->caplen < (tunnel_offset+sizeof(struct kcompact_ipv6_hdr))) + return 0; + + tunneled_ipv6 = (struct kcompact_ipv6_hdr *) (&pkt[tunnel_offset]); + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto = tunneled_ipv6->nexthdr; + + /* Values of IPv6 addresses are stored as network byte order */ + memcpy(&hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v6, &tunneled_ipv6->saddr, sizeof(tunneled_ipv6->saddr)); + memcpy(&hdr->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6, &tunneled_ipv6->daddr, sizeof(tunneled_ipv6->daddr)); + + ip_len = sizeof(struct kcompact_ipv6_hdr); + + /* Note: NEXTHDR_AUTH, NEXTHDR_ESP, NEXTHDR_IPV6, NEXTHDR_MOBILITY are not handled */ + while (hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == NEXTHDR_HOP || + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == NEXTHDR_DEST || + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == NEXTHDR_ROUTING || + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == NEXTHDR_FRAGMENT) { + struct kcompact_ipv6_opt_hdr *ipv6_opt; + + if (hdr->caplen < tunnel_offset + ip_len + sizeof(struct kcompact_ipv6_opt_hdr)) + return 1; + + ipv6_opt = (struct kcompact_ipv6_opt_hdr *)(&pkt[tunnel_offset + ip_len]); + ip_len += sizeof(struct kcompact_ipv6_opt_hdr); + fragment_offset = 0; + if (hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == NEXTHDR_HOP || + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == NEXTHDR_DEST || + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == NEXTHDR_ROUTING) + ip_len += ipv6_opt->hdrlen * 8; + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto = ipv6_opt->nexthdr; + } + + if (hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == NEXTHDR_NONE) + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto = 0; + + tunnel_offset += ip_len; + + } else + return 0; + + if (fragment_offset) + return 1; + + if(hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == IPPROTO_TCP) { + struct tcphdr *tcp; + + if(hdr->caplen < tunnel_offset + sizeof(struct tcphdr)) + return 1; + + tcp = (struct tcphdr *)(&pkt[tunnel_offset]); + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_l4_src_port = ntohs(tcp->source), + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_l4_dst_port = ntohs(tcp->dest); + } else if(hdr->extended_hdr.parsed_pkt.tunnel.tunneled_proto == IPPROTO_UDP) { + struct udphdr *udp; + + if(hdr->caplen < tunnel_offset + sizeof(struct udphdr)) + return 1; + + udp = (struct udphdr *)(&pkt[tunnel_offset]); + + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_l4_src_port = ntohs(udp->source), + hdr->extended_hdr.parsed_pkt.tunnel.tunneled_l4_dst_port = ntohs(udp->dest); + } + + return 2; +} + + +int fastnetmon_parse_pkt(unsigned char *pkt, struct pfring_pkthdr *hdr, u_int8_t level /* L2..L4, 5 (tunnel) */, + u_int8_t add_timestamp /* 0,1 */, u_int8_t add_hash /* 0,1 */) { + struct ethhdr *eh = (struct ethhdr*) pkt; + u_int32_t displ = 0, ip_len; + u_int16_t analyzed = 0, fragment_offset = 0; + + hdr->extended_hdr.parsed_pkt.tunnel.tunnel_id = NO_TUNNEL_ID; + + /* Note: in order to optimize the computation, this function expects a zero-ed + * or partially parsed pkthdr */ + //memset(&hdr->extended_hdr.parsed_pkt, 0, sizeof(struct pkt_parsing_info)); + //hdr->extended_hdr.parsed_header_len = 0; + + if (hdr->extended_hdr.parsed_pkt.offset.l3_offset != 0) + goto L3; + + memcpy(&hdr->extended_hdr.parsed_pkt.dmac, eh->h_dest, sizeof(eh->h_dest)); + memcpy(&hdr->extended_hdr.parsed_pkt.smac, eh->h_source, sizeof(eh->h_source)); + + hdr->extended_hdr.parsed_pkt.eth_type = ntohs(eh->h_proto); + hdr->extended_hdr.parsed_pkt.offset.eth_offset = 0; + hdr->extended_hdr.parsed_pkt.offset.vlan_offset = 0; + hdr->extended_hdr.parsed_pkt.vlan_id = 0; /* Any VLAN */ + + if (hdr->extended_hdr.parsed_pkt.eth_type == 0x8100 /* 802.1q (VLAN) */) { + struct eth_vlan_hdr *vh; + hdr->extended_hdr.parsed_pkt.offset.vlan_offset = sizeof(struct ethhdr) - sizeof(struct eth_vlan_hdr); + while (hdr->extended_hdr.parsed_pkt.eth_type == 0x8100 /* 802.1q (VLAN) */ ) { + hdr->extended_hdr.parsed_pkt.offset.vlan_offset += sizeof(struct eth_vlan_hdr); + vh = (struct eth_vlan_hdr *) &pkt[hdr->extended_hdr.parsed_pkt.offset.vlan_offset]; + hdr->extended_hdr.parsed_pkt.vlan_id = ntohs(vh->h_vlan_id) & 0x0fff; + hdr->extended_hdr.parsed_pkt.eth_type = ntohs(vh->h_proto); + displ += sizeof(struct eth_vlan_hdr); + } + } + + hdr->extended_hdr.parsed_pkt.offset.l3_offset = hdr->extended_hdr.parsed_pkt.offset.eth_offset + displ + sizeof(struct ethhdr); + + L3: + + analyzed = 2; + + if (level < 3) + goto TIMESTAMP; + + if (hdr->extended_hdr.parsed_pkt.offset.l4_offset != 0) + goto L4; + + if (hdr->extended_hdr.parsed_pkt.eth_type == 0x0800 /* IPv4 */) { + struct iphdr *ip; + + hdr->extended_hdr.parsed_pkt.ip_version = 4; + + if (hdr->caplen < hdr->extended_hdr.parsed_pkt.offset.l3_offset + sizeof(struct iphdr)) + goto TIMESTAMP; + + ip = (struct iphdr *)(&pkt[hdr->extended_hdr.parsed_pkt.offset.l3_offset]); + + hdr->extended_hdr.parsed_pkt.ipv4_src = ntohl(ip->saddr); + hdr->extended_hdr.parsed_pkt.ipv4_dst = ntohl(ip->daddr); + hdr->extended_hdr.parsed_pkt.l3_proto = ip->protocol; + hdr->extended_hdr.parsed_pkt.ipv4_tos = ip->tos; + fragment_offset = ip->frag_off & htons(IP_OFFSET); /* fragment, but not the first */ + ip_len = ip->ihl*4; + + } else if (hdr->extended_hdr.parsed_pkt.eth_type == 0x86DD /* IPv6 */) { + struct kcompact_ipv6_hdr *ipv6; + + hdr->extended_hdr.parsed_pkt.ip_version = 6; + + if (hdr->caplen < hdr->extended_hdr.parsed_pkt.offset.l3_offset + sizeof(struct kcompact_ipv6_hdr)) + goto TIMESTAMP; + + ipv6 = (struct kcompact_ipv6_hdr*)(&pkt[hdr->extended_hdr.parsed_pkt.offset.l3_offset]); + ip_len = sizeof(struct kcompact_ipv6_hdr); + + /* Values of IPv6 addresses are stored as network byte order */ + memcpy(&hdr->extended_hdr.parsed_pkt.ipv6_src, &ipv6->saddr, sizeof(ipv6->saddr)); + memcpy(&hdr->extended_hdr.parsed_pkt.ipv6_dst, &ipv6->daddr, sizeof(ipv6->daddr)); + + hdr->extended_hdr.parsed_pkt.l3_proto = ipv6->nexthdr; + hdr->extended_hdr.parsed_pkt.ipv6_tos = ipv6->priority; /* IPv6 class of service */ + + /* Note: NEXTHDR_AUTH, NEXTHDR_ESP, NEXTHDR_IPV6, NEXTHDR_MOBILITY are not handled */ + while (hdr->extended_hdr.parsed_pkt.l3_proto == NEXTHDR_HOP || + hdr->extended_hdr.parsed_pkt.l3_proto == NEXTHDR_DEST || + hdr->extended_hdr.parsed_pkt.l3_proto == NEXTHDR_ROUTING || + hdr->extended_hdr.parsed_pkt.l3_proto == NEXTHDR_FRAGMENT) { + struct kcompact_ipv6_opt_hdr *ipv6_opt; + + if(hdr->caplen < hdr->extended_hdr.parsed_pkt.offset.l3_offset + ip_len + sizeof(struct kcompact_ipv6_opt_hdr)) + goto TIMESTAMP; + + ipv6_opt = (struct kcompact_ipv6_opt_hdr *)(&pkt[hdr->extended_hdr.parsed_pkt.offset.l3_offset + ip_len]); + ip_len += sizeof(struct kcompact_ipv6_opt_hdr); + if (hdr->extended_hdr.parsed_pkt.l3_proto == NEXTHDR_HOP || + hdr->extended_hdr.parsed_pkt.l3_proto == NEXTHDR_DEST || + hdr->extended_hdr.parsed_pkt.l3_proto == NEXTHDR_ROUTING) + ip_len += ipv6_opt->hdrlen * 8; + + hdr->extended_hdr.parsed_pkt.l3_proto = ipv6_opt->nexthdr; + } + + if (hdr->extended_hdr.parsed_pkt.l3_proto == NEXTHDR_NONE) + hdr->extended_hdr.parsed_pkt.l3_proto = 0; + } else { + hdr->extended_hdr.parsed_pkt.l3_proto = 0; + goto TIMESTAMP; + } + + hdr->extended_hdr.parsed_pkt.offset.l4_offset = hdr->extended_hdr.parsed_pkt.offset.l3_offset + ip_len; + + L4: + + analyzed = 3; + + if (level < 4 || fragment_offset) + goto TIMESTAMP; + + if(hdr->extended_hdr.parsed_pkt.l3_proto == IPPROTO_TCP) { + struct tcphdr *tcp; + + if(hdr->caplen < hdr->extended_hdr.parsed_pkt.offset.l4_offset + sizeof(struct tcphdr)) + goto TIMESTAMP; + + tcp = (struct tcphdr *)(&pkt[hdr->extended_hdr.parsed_pkt.offset.l4_offset]); + + hdr->extended_hdr.parsed_pkt.l4_src_port = ntohs(tcp->source); + hdr->extended_hdr.parsed_pkt.l4_dst_port = ntohs(tcp->dest); + hdr->extended_hdr.parsed_pkt.offset.payload_offset = hdr->extended_hdr.parsed_pkt.offset.l4_offset + (tcp->doff * 4); + hdr->extended_hdr.parsed_pkt.tcp.seq_num = ntohl(tcp->seq); + hdr->extended_hdr.parsed_pkt.tcp.ack_num = ntohl(tcp->ack_seq); + hdr->extended_hdr.parsed_pkt.tcp.flags = (tcp->fin * TH_FIN_MULTIPLIER) + (tcp->syn * TH_SYN_MULTIPLIER) + + (tcp->rst * TH_RST_MULTIPLIER) + (tcp->psh * TH_PUSH_MULTIPLIER) + + (tcp->ack * TH_ACK_MULTIPLIER) + (tcp->urg * TH_URG_MULTIPLIER); + + analyzed = 4; + } else if(hdr->extended_hdr.parsed_pkt.l3_proto == IPPROTO_UDP) { + struct udphdr *udp; + + if(hdr->caplen < hdr->extended_hdr.parsed_pkt.offset.l4_offset + sizeof(struct udphdr)) + goto TIMESTAMP; + + udp = (struct udphdr *)(&pkt[hdr->extended_hdr.parsed_pkt.offset.l4_offset]); + + hdr->extended_hdr.parsed_pkt.l4_src_port = ntohs(udp->source), hdr->extended_hdr.parsed_pkt.l4_dst_port = ntohs(udp->dest); + hdr->extended_hdr.parsed_pkt.offset.payload_offset = hdr->extended_hdr.parsed_pkt.offset.l4_offset + sizeof(struct udphdr); + + analyzed = 4; + + if (level < 5) + goto TIMESTAMP; + + /* GTPv1 */ + if((hdr->extended_hdr.parsed_pkt.l4_src_port == GTP_SIGNALING_PORT) || + (hdr->extended_hdr.parsed_pkt.l4_dst_port == GTP_SIGNALING_PORT) || + (hdr->extended_hdr.parsed_pkt.l4_src_port == GTP_U_DATA_PORT) || + (hdr->extended_hdr.parsed_pkt.l4_dst_port == GTP_U_DATA_PORT)) { + struct gtp_v1_hdr *gtp; + u_int16_t gtp_len; + + if(hdr->caplen < (hdr->extended_hdr.parsed_pkt.offset.payload_offset+sizeof(struct gtp_v1_hdr))) + goto TIMESTAMP; + + gtp = (struct gtp_v1_hdr *) (&pkt[hdr->extended_hdr.parsed_pkt.offset.payload_offset]); + gtp_len = sizeof(struct gtp_v1_hdr); + + if(((gtp->flags & GTP_FLAGS_VERSION) >> GTP_FLAGS_VERSION_SHIFT) == GTP_VERSION_1) { + struct iphdr *tunneled_ip; + + hdr->extended_hdr.parsed_pkt.tunnel.tunnel_id = ntohl(gtp->teid); + + if((hdr->extended_hdr.parsed_pkt.l4_src_port == GTP_U_DATA_PORT) || + (hdr->extended_hdr.parsed_pkt.l4_dst_port == GTP_U_DATA_PORT)) { + if(gtp->flags & (GTP_FLAGS_EXTENSION | GTP_FLAGS_SEQ_NUM | GTP_FLAGS_NPDU_NUM)) { + struct gtp_v1_opt_hdr *gtpopt; + + if(hdr->caplen < (hdr->extended_hdr.parsed_pkt.offset.payload_offset+gtp_len+sizeof(struct gtp_v1_opt_hdr))) + goto TIMESTAMP; + + gtpopt = (struct gtp_v1_opt_hdr *) (&pkt[hdr->extended_hdr.parsed_pkt.offset.payload_offset + gtp_len]); + gtp_len += sizeof(struct gtp_v1_opt_hdr); + + if((gtp->flags & GTP_FLAGS_EXTENSION) && gtpopt->next_ext_hdr) { + struct gtp_v1_ext_hdr *gtpext; + u_int8_t *next_ext_hdr; + + do { + if(hdr->caplen < (hdr->extended_hdr.parsed_pkt.offset.payload_offset+gtp_len +1/* 8bit len field */)) goto TIMESTAMP; + gtpext = (struct gtp_v1_ext_hdr *) (&pkt[hdr->extended_hdr.parsed_pkt.offset.payload_offset + gtp_len]); + gtp_len += (gtpext->len * GTP_EXT_HDR_LEN_UNIT_BYTES); + if(gtpext->len == 0 || hdr->caplen < (hdr->extended_hdr.parsed_pkt.offset.payload_offset+gtp_len)) goto TIMESTAMP; + next_ext_hdr = (u_int8_t *) (&pkt[hdr->extended_hdr.parsed_pkt.offset.payload_offset + gtp_len - 1/* 8bit next_ext_hdr field*/]); + } while (*next_ext_hdr); + } + } + + if(hdr->caplen < (hdr->extended_hdr.parsed_pkt.offset.payload_offset + gtp_len + sizeof(struct iphdr))) + goto TIMESTAMP; + + tunneled_ip = (struct iphdr *) (&pkt[hdr->extended_hdr.parsed_pkt.offset.payload_offset + gtp_len]); + + analyzed += __pfring_parse_tunneled_pkt(pkt, hdr, tunneled_ip->version, hdr->extended_hdr.parsed_pkt.offset.payload_offset + gtp_len); + } + } + } + } else if(hdr->extended_hdr.parsed_pkt.l3_proto == IPPROTO_GRE /* 0x47 */) { + struct gre_header *gre = (struct gre_header*)(&pkt[hdr->extended_hdr.parsed_pkt.offset.l4_offset]); + int gre_offset; + + gre->flags_and_version = ntohs(gre->flags_and_version); + gre->proto = ntohs(gre->proto); + + gre_offset = sizeof(struct gre_header); + + if((gre->flags_and_version & GRE_HEADER_VERSION) == 0) { + if(gre->flags_and_version & (GRE_HEADER_CHECKSUM | GRE_HEADER_ROUTING)) gre_offset += 4; + if(gre->flags_and_version & GRE_HEADER_KEY) { + u_int32_t *tunnel_id = (u_int32_t*)(&pkt[hdr->extended_hdr.parsed_pkt.offset.l4_offset+gre_offset]); + gre_offset += 4; + hdr->extended_hdr.parsed_pkt.tunnel.tunnel_id = ntohl(*tunnel_id); + } + if(gre->flags_and_version & GRE_HEADER_SEQ_NUM) gre_offset += 4; + + hdr->extended_hdr.parsed_pkt.offset.payload_offset = hdr->extended_hdr.parsed_pkt.offset.l4_offset + gre_offset; + + analyzed = 4; + + if (level < 5) + goto TIMESTAMP; + + if (gre->proto == ETH_P_IP /* IPv4 */ || gre->proto == ETH_P_IPV6 /* IPv6 */) + analyzed += __pfring_parse_tunneled_pkt(pkt, hdr, gre->proto == ETH_P_IP ? 4 : 6, hdr->extended_hdr.parsed_pkt.offset.payload_offset); + + } else { /* TODO handle other GRE versions */ + hdr->extended_hdr.parsed_pkt.offset.payload_offset = hdr->extended_hdr.parsed_pkt.offset.l4_offset; + } + } else { + hdr->extended_hdr.parsed_pkt.offset.payload_offset = hdr->extended_hdr.parsed_pkt.offset.l4_offset; + hdr->extended_hdr.parsed_pkt.l4_src_port = hdr->extended_hdr.parsed_pkt.l4_dst_port = 0; + } + +TIMESTAMP: + + if(add_timestamp && hdr->ts.tv_sec == 0) + gettimeofday(&hdr->ts, NULL); /* TODO What about using clock_gettime(CLOCK_REALTIME, ts) ? */ + + if (add_hash && hdr->extended_hdr.pkt_hash == 0) + hdr->extended_hdr.pkt_hash = pfring_hash_pkt(hdr); + + return analyzed; +} + +int fastnetmon_print_parsed_pkt(char *buff, u_int buff_len, const u_char *p, const struct pfring_pkthdr *h) { + char buf1[32], buf2[32]; + int buff_used = 0; + + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[%s -> %s] ", + etheraddr2string(h->extended_hdr.parsed_pkt.smac, buf1), + etheraddr2string(h->extended_hdr.parsed_pkt.dmac, buf2)); + + if(h->extended_hdr.parsed_pkt.offset.vlan_offset) + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[vlan %u] ", h->extended_hdr.parsed_pkt.vlan_id); + + if (h->extended_hdr.parsed_pkt.eth_type == 0x0800 /* IPv4*/ || h->extended_hdr.parsed_pkt.eth_type == 0x86DD /* IPv6*/) { + + if(h->extended_hdr.parsed_pkt.eth_type == 0x0800 /* IPv4*/ ) { + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[IPv4][%s:%d ", intoa(h->extended_hdr.parsed_pkt.ipv4_src), h->extended_hdr.parsed_pkt.l4_src_port); + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "-> %s:%d] ", intoa(h->extended_hdr.parsed_pkt.ipv4_dst), h->extended_hdr.parsed_pkt.l4_dst_port); + } else { + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[IPv6][%s:%d ", in6toa(h->extended_hdr.parsed_pkt.ipv6_src), h->extended_hdr.parsed_pkt.l4_src_port); + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "-> %s:%d] ", in6toa(h->extended_hdr.parsed_pkt.ipv6_dst), h->extended_hdr.parsed_pkt.l4_dst_port); + } + + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[l3_proto=%s]", proto2str(h->extended_hdr.parsed_pkt.l3_proto)); + + if(h->extended_hdr.parsed_pkt.tunnel.tunnel_id != NO_TUNNEL_ID) { + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[TEID=0x%08X][tunneled_proto=%s]", + h->extended_hdr.parsed_pkt.tunnel.tunnel_id, + proto2str(h->extended_hdr.parsed_pkt.tunnel.tunneled_proto)); + + if(h->extended_hdr.parsed_pkt.eth_type == 0x0800 /* IPv4*/ ) { + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[IPv4][%s:%d ", + intoa(h->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v4), + h->extended_hdr.parsed_pkt.tunnel.tunneled_l4_src_port); + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "-> %s:%d] ", + intoa(h->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v4), + h->extended_hdr.parsed_pkt.tunnel.tunneled_l4_dst_port); + } else { + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[IPv6][%s:%d ", + in6toa(h->extended_hdr.parsed_pkt.tunnel.tunneled_ip_src.v6), + h->extended_hdr.parsed_pkt.tunnel.tunneled_l4_src_port); + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "-> %s:%d] ", + in6toa(h->extended_hdr.parsed_pkt.tunnel.tunneled_ip_dst.v6), + h->extended_hdr.parsed_pkt.tunnel.tunneled_l4_dst_port); + } + } + + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[hash=%u][tos=%d][tcp_seq_num=%u]", + h->extended_hdr.pkt_hash, + h->extended_hdr.parsed_pkt.ipv4_tos, + h->extended_hdr.parsed_pkt.tcp.seq_num); + + } else if(h->extended_hdr.parsed_pkt.eth_type == 0x0806 /* ARP */) { + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, "[ARP]"); + if (buff_len >= h->extended_hdr.parsed_pkt.offset.l3_offset+30) { + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[Sender=%s/%s]", + etheraddr2string(&p[h->extended_hdr.parsed_pkt.offset.l3_offset+8], buf1), + intoa(ntohl(*((u_int32_t *) &p[h->extended_hdr.parsed_pkt.offset.l3_offset+14])))); + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[Target=%s/%s]", + etheraddr2string(&p[h->extended_hdr.parsed_pkt.offset.l3_offset+18], buf2), + intoa(ntohl(*((u_int32_t *) &p[h->extended_hdr.parsed_pkt.offset.l3_offset+24])))); + } + } else { + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + "[eth_type=0x%04X]", h->extended_hdr.parsed_pkt.eth_type); + } + + buff_used += snprintf(&buff[buff_used], buff_len - buff_used, + " [caplen=%d][len=%d][parsed_header_len=%d][eth_offset=%d][l3_offset=%d][l4_offset=%d][payload_offset=%d]\n", + h->caplen, h->len, h->extended_hdr.parsed_header_len, + h->extended_hdr.parsed_pkt.offset.eth_offset, + h->extended_hdr.parsed_pkt.offset.l3_offset, + h->extended_hdr.parsed_pkt.offset.l4_offset, + h->extended_hdr.parsed_pkt.offset.payload_offset); + + return buff_used; +} + +char *etheraddr2string(const u_char *ep, char *buf) { + char *hex = "0123456789ABCDEF"; + u_int i, j; + char *cp; + + cp = buf; + if((j = *ep >> 4) != 0) + *cp++ = hex[j]; + else + *cp++ = '0'; + + *cp++ = hex[*ep++ & 0xf]; + + for(i = 5; (int)--i >= 0;) { + *cp++ = ':'; + if((j = *ep >> 4) != 0) + *cp++ = hex[j]; + else + *cp++ = '0'; + + *cp++ = hex[*ep++ & 0xf]; + } + + *cp = '\0'; + return (buf); +} + +char *intoa(unsigned int addr) { + static char buf[sizeof "ff:ff:ff:ff:ff:ff:255.255.255.255"]; + return(_intoa(addr, buf, sizeof(buf))); +} + +char *_intoa(unsigned int addr, char* buf, u_short bufLen) { + char *cp, *retStr; + u_int byte; + int n; + + cp = &buf[bufLen]; + *--cp = '\0'; + + n = 4; + do { + byte = addr & 0xff; + *--cp = byte % 10 + '0'; + byte /= 10; + if(byte > 0) { + *--cp = byte % 10 + '0'; + byte /= 10; + if(byte > 0) + *--cp = byte + '0'; + } + *--cp = '.'; + addr >>= 8; + } while (--n > 0); + + retStr = (char*)(cp+1); + + return(retStr); +} + +static char *in6toa(struct in6_addr addr6) { + static char buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"]; + char *ret = (char*)inet_ntop(AF_INET6, &addr6, buf, sizeof(buf)); + + if(ret == NULL) { + //printf("Internal error (&buff[buff_used]r too short)"); + buf[0] = '\0'; + } + + return(ret); +} + +char *proto2str(u_short proto) { + static char protoName[8]; + + switch(proto) { + case IPPROTO_TCP: return("TCP"); + case IPPROTO_UDP: return("UDP"); + case IPPROTO_ICMP: return("ICMP"); + case IPPROTO_GRE: return("GRE"); + default: + snprintf(protoName, sizeof(protoName), "%d", proto); + return(protoName); + } +} diff --git a/tests/fastnetmon_packet_parser.h b/tests/fastnetmon_packet_parser.h new file mode 100644 index 0000000..70ab41c --- /dev/null +++ b/tests/fastnetmon_packet_parser.h @@ -0,0 +1,98 @@ +#ifndef _PFRING_PACKET_PARSER_H +#define _PFRING_PACKET_PARSER_H + +#include +#include // in6_addr + +#define ETH_ALEN 6 + +/* + Note that as offsets *can* be negative, + please do not change them to unsigned +*/ +struct pkt_offset { + int16_t eth_offset; /* + This offset *must* be added to all offsets below + ONLY if you are inside the kernel (e.g. when you + code a pf_ring plugin). Ignore it in user-space. + */ + int16_t vlan_offset; + int16_t l3_offset; + int16_t l4_offset; + int16_t payload_offset; +}; + + +typedef union { + struct in6_addr v6; /* IPv6 src/dst IP addresses (Network byte order) */ + u_int32_t v4; /* IPv4 src/dst IP addresses */ +} ip_addr; + +/* GPRS Tunneling Protocol */ +typedef struct { + u_int32_t tunnel_id; /* GTP/GRE tunnelId or NO_TUNNEL_ID for no filtering */ + u_int8_t tunneled_proto; + ip_addr tunneled_ip_src, tunneled_ip_dst; + u_int16_t tunneled_l4_src_port, tunneled_l4_dst_port; +} tunnel_info; + +struct pkt_parsing_info { + /* Core fields (also used by NetFlow) */ + u_int8_t dmac[ETH_ALEN], smac[ETH_ALEN]; /* MAC src/dst addresses */ + u_int16_t eth_type; /* Ethernet type */ + u_int16_t vlan_id; /* VLAN Id or NO_VLAN */ + u_int8_t ip_version; + u_int8_t l3_proto, ip_tos; /* Layer 3 protocol/TOS */ + ip_addr ip_src, ip_dst; /* IPv4 src/dst IP addresses */ + u_int16_t l4_src_port, l4_dst_port; /* Layer 4 src/dst ports */ + struct { + u_int8_t flags; /* TCP flags (0 if not available) */ + u_int32_t seq_num, ack_num; /* TCP sequence number */ + } tcp; + + tunnel_info tunnel; + u_int16_t last_matched_plugin_id; /* If > 0 identifies a plugin to that matched the packet */ + u_int16_t last_matched_rule_id; /* If > 0 identifies a rule that matched the packet */ + struct pkt_offset offset; /* Offsets of L3/L4/payload elements */ +}; + +struct pfring_extended_pkthdr { + u_int64_t timestamp_ns; /* Packet timestamp at ns precision. Note that if your NIC supports + hardware timestamp, this is the place to read timestamp from */ +#define PKT_FLAGS_CHECKSUM_OFFLOAD 1 << 0 /* IP/TCP checksum offload enabled */ +#define PKT_FLAGS_CHECKSUM_OK 1 << 1 /* Valid checksum (with IP/TCP checksum offload enabled) */ +#define PKT_FLAGS_IP_MORE_FRAG 1 << 2 /* IP More fragments flag set */ +#define PKT_FLAGS_IP_FRAG_OFFSET 1 << 3 /* IP fragment offset set (not 0) */ +#define PKT_FLAGS_VLAN_HWACCEL 1 << 4 /* VLAN stripped by hw */ + u_int32_t flags; + /* --- short header ends here --- */ + u_int8_t rx_direction; /* 1=RX: packet received by the NIC, 0=TX: packet transmitted by the NIC */ + int32_t if_index; /* index of the interface on which the packet has been received. + It can be also used to report other information */ + u_int32_t pkt_hash; /* Hash based on the packet header */ + struct { + int bounce_interface; /* Interface Id where this packet will bounce after processing + if its values is other than UNKNOWN_INTERFACE */ + struct sk_buff *reserved; /* Kernel only pointer */ + } tx; + u_int16_t parsed_header_len; /* Extra parsing data before packet */ + + /* NOTE: leave it as last field of the memset on parse_pkt() will fail */ + struct pkt_parsing_info parsed_pkt; /* packet parsing info */ +}; + + +/* NOTE: Keep 'struct pfring_pkthdr' in sync with 'struct pcap_pkthdr' */ +struct pfring_pkthdr { + /* pcap header */ + struct timeval ts; /* time stamp */ + u_int32_t caplen; /* length of portion present */ + u_int32_t len; /* length of whole packet (off wire) */ + struct pfring_extended_pkthdr extended_hdr; /* PF_RING extended header */ +}; + +// Prototypes +int fastnetmon_print_parsed_pkt(char *buff, u_int buff_len, const u_char *p, const struct pfring_pkthdr *h); +int fastnetmon_parse_pkt(unsigned char *pkt, struct pfring_pkthdr *hdr, u_int8_t level /* L2..L4, 5 (tunnel) */,u_int8_t add_timestamp /* 0,1 */, u_int8_t add_hash /* 0,1 */); + +#endif diff --git a/tests/netmap.cpp b/tests/netmap.cpp index 9b17e01..cc812b6 100644 --- a/tests/netmap.cpp +++ b/tests/netmap.cpp @@ -4,34 +4,63 @@ #define NETMAP_WITH_LIBS #include - #include // For pooling operations #include -/* - How to compile - - FreeBSD: - clang++ netmap.cpp -I /usr/local/include -L/usr/local/lib -lboost_thread -lboost_system - - Linux: - g++ netmap.cpp -I/usr/src/fastnetmon/tests/netmap_includes -lboost_thread -lboost_system -*/ +#include "fastnetmon_packet_parser.h" int number_of_packets = 0; +/* prototypes */ +void netmap_thread(struct nm_desc* netmap_descriptor, int netmap_thread); +void consume_pkt(u_char* buffer, int len); + +int receive_packets(struct netmap_ring *ring) { + u_int cur, rx, n; + + cur = ring->cur; + n = nm_ring_space(ring); + + for (rx = 0; rx < n; rx++) { + struct netmap_slot *slot = &ring->slot[cur]; + char *p = NETMAP_BUF(ring, slot->buf_idx); + + // process data + consume_pkt((u_char*)p, slot->len); + + cur = nm_ring_next(ring, cur); + } + + ring->head = ring->cur = cur; + return (rx); +} + void consume_pkt(u_char* buffer, int len) { + //static char packet_data[2000]; //printf("Got packet with length: %d\n", len); + //memcpy(packet_data, buffer, len); + struct pfring_pkthdr l2tp_header; + memset(&l2tp_header, 0, sizeof(l2tp_header)); + l2tp_header.len = len; + l2tp_header.caplen = len; + + fastnetmon_parse_pkt((u_char*)buffer, &l2tp_header, 4, 1, 0); + + //char print_buffer[512]; + //fastnetmon_print_parsed_pkt(print_buffer, 512, (u_char*)buffer, &l2tp_header); + //printf("%s\n", print_buffer); + + __sync_fetch_and_add(&number_of_packets, 1); } void receiver(void) { struct nm_desc *netmap_descriptor; - struct pollfd fds; - struct nm_pkthdr h; - u_char* buf; + + u_int num_cpus = sysconf( _SC_NPROCESSORS_ONLN ); + printf("We have %d cpus\n", num_cpus); std::string interface = "netmap:eth4"; netmap_descriptor = nm_open(interface.c_str(), NULL, 0, 0); @@ -42,9 +71,66 @@ void receiver(void) { return; } - fds.fd = NETMAP_FD(netmap_descriptor); + printf("Mapped %dKB memory at %p\n", netmap_descriptor->req.nr_memsize>>10, netmap_descriptor->mem); + printf("We have %d tx and %d rx rings\n", netmap_descriptor->req.nr_tx_rings, netmap_descriptor->req.nr_rx_rings); + + /* + protocol stack and may cause a reset of the card, + which in turn may take some time for the PHY to + reconfigure. We do the open here to have time to reset. + */ + + int wait_link = 2; + printf("Wait %d seconds for NIC reset\n", wait_link); + sleep(wait_link); + + boost::thread* boost_threads_array[num_cpus]; + for (int i = 0; i < num_cpus; i++) { + struct nm_desc nmd = *netmap_descriptor; + uint64_t nmd_flags = 0; + + if (nmd.req.nr_flags != NR_REG_ALL_NIC) { + printf("SHIT SHIT SHIT HAPPINED\n"); + } + + nmd.req.nr_flags = NR_REG_ONE_NIC; + nmd.req.nr_ringid = i; + + /* Only touch one of the rings (rx is already ok) */ + nmd_flags |= NETMAP_NO_TX_POLL; + + struct nm_desc* new_nmd = nm_open(interface.c_str(), NULL, nmd_flags | NM_OPEN_IFNAME | NM_OPEN_NO_MMAP, &nmd); + + if (new_nmd == NULL) { + printf("Can't open netmap descripto for netmap\n"); + exit(1); + } + + printf("Start new thread %d\n", i); + // Start thread and pass netmap descriptor to it + boost_threads_array[i] = new boost::thread(netmap_thread, new_nmd, i); + } + + printf("Wait for thread finish\n"); + // Wait all threads for completion + for (int i = 0; i < num_cpus; i++) { + boost_threads_array[i]->join(); + } +} + +void netmap_thread(struct nm_desc* netmap_descriptor, int thread_number) { + struct nm_pkthdr h; + u_char* buf; + struct pollfd fds; + fds.fd = netmap_descriptor->fd;//NETMAP_FD(netmap_descriptor); fds.events = POLLIN; + struct netmap_ring *rxring = NULL; + struct netmap_if *nifp = netmap_descriptor->nifp; + + printf("Reading from fd %d thread id: %d\n", netmap_descriptor->fd, thread_number); + printf("I assume bug here! My first ring is %d and last ring id is %d I'm thread %d\n", netmap_descriptor->first_rx_ring, netmap_descriptor->last_rx_ring, thread_number); + for (;;) { // We will wait 1000 microseconds for retry, for infinite timeout please use -1 int poll_result = poll(&fds, 1, 1000); @@ -58,17 +144,27 @@ void receiver(void) { printf("poll failed with return code -1\n"); } - while ( (buf = nm_nextpkt(netmap_descriptor, &h)) ) { - consume_pkt(buf, h.len); + for (int i = netmap_descriptor->first_rx_ring; i <= netmap_descriptor->last_rx_ring; i++) { + //printf("Check ring %d from thread %d\n", i, thread_number); + rxring = NETMAP_RXRING(nifp, i); + + if (nm_ring_empty(rxring)) { + continue; + } + + int m = receive_packets(rxring); } + + //while ( (buf = nm_nextpkt(netmap_descriptor, &h)) ) { + // consume_pkt(buf, h.len); + //} } - nm_close(netmap_descriptor); + //nm_close(netmap_descriptor); } int main() { //receiver(); - boost::thread netmap_thread(receiver); for (;;) {