/* new_l2tp_k.c * Copyright 2004,2007 Benjamin LaHaise. All Rights Reserved. * Kernel side of a simple L2TP implementation for use with Babylon. * * Portions copied from net/ipv4/raw.c - Alan Cox, David S. Miller, * Ross Biro, Fred N. van Kempen * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define L2TP_HARDWIRE 1 #include "../include/bab_module.h" #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include #endif #include #include #include "../include/aps_if.h" #include "../include/l2tp_linux.h" #include "l2tp_build.h" #include "../include/vercomp.h" struct workqueue_struct *l2tp_wq; struct l2tp_info; struct l2tp_tunnel; struct l2tp_session { channel_t ch; u16 session_id; u16 peer_session_id; int recursion; struct l2tp_tunnel *tunnel; #ifdef L2TP_HARDWIRE /* Used for hardwired outgoing */ struct net_device *hw_ether_dev; u32 hw_src_ip; u32 hw_dst_ip; u16 hw_src_port; u16 hw_dst_port; u16 hw_peer_tunnel; u16 hw_peer_session; u8 hw_gw_mac[6]; #endif struct rcu_head rcu_head; }; struct l2tp_tunnel { struct list_head list; atomic_t count; u16 tunnel_id; u16 peer_tunnel_id; struct sock *sk; struct socket *tx_socket; struct l2tp_info *l2tp; struct work_struct work; unsigned csum_payload; #define L2TP_MAX_SESSIONS 65536 struct l2tp_session *sessions[L2TP_MAX_SESSIONS]; /* sessions[0] is the tunnel control session */ }; #ifdef __mips__ #define L2TP_MAX_TUNNELS 64 #else #define L2TP_MAX_TUNNELS 65536 #endif static struct l2tp_tunnel *l2tp_tunnels[L2TP_MAX_TUNNELS]; static unsigned next_l2tp_tunnel_id = 1; static DEFINE_SPINLOCK(tunnels_lock); struct l2tp_info { struct list_head tunnel_list; struct sock *sk; /* used for l2tp packets to userspace */ struct socket *rx_socket; struct sockaddr_l2tp l2tp_id; /* session id */ }; static int num_tunnels, num_infos; /* debug */ static struct kmem_cache *session_cachep; static struct kmem_cache *l2tp_info_cachep; int l2tp_alloc_tunnel_id(struct l2tp_tunnel *tunnel, int hint) { u16 id = hint; int i; spin_lock(&tunnels_lock); if (hint <= 0) id = next_l2tp_tunnel_id++; for (i=0; itunnel_id = id++; id = id % L2TP_MAX_TUNNELS; if (!l2tp_tunnels[tunnel->tunnel_id]) { l2tp_tunnels[tunnel->tunnel_id] = tunnel; next_l2tp_tunnel_id = id % L2TP_MAX_TUNNELS; spin_unlock(&tunnels_lock); return 0; } if (hint) { pr_debug("unabled to alloc requested l2tp tunnel id (0x%04x)\n", hint); spin_unlock(&tunnels_lock); return -EBUSY; } } spin_unlock(&tunnels_lock); pr_debug("unabled to alloc l2tp tunnel id\n"); return -EBUSY; } static struct l2tp_tunnel *alloc_l2tp_tunnel(struct l2tp_info *l2tp, int hint) { struct l2tp_tunnel *tunnel; tunnel = vmalloc(sizeof(*tunnel)); if (!tunnel) return NULL; memset(tunnel, 0, sizeof(*tunnel)); tunnel->l2tp = l2tp; atomic_set(&tunnel->count, 1); if (l2tp_alloc_tunnel_id(tunnel, hint)) { vfree(tunnel); return NULL; } list_add(&tunnel->list, &l2tp->tunnel_list); num_tunnels ++; pr_debug("alloc'd l2tp tunnel[0x%04x]\n", tunnel->tunnel_id); return tunnel; } #define get_l2tp_tunnel(x) (atomic_inc(&(x)->count)) static struct l2tp_tunnel *find_get_l2tp_tunnel(int hint) { struct l2tp_tunnel *tunnel; if (hint <= 0 || hint >= L2TP_MAX_TUNNELS) return NULL; tunnel = l2tp_tunnels[hint]; if (tunnel) get_l2tp_tunnel(tunnel); return tunnel; } static void free_l2tp_session(struct l2tp_tunnel *tunnel, struct l2tp_session *session); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) static void put_tunnel_work(void *data) #else static void put_tunnel_work(struct work_struct *work) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) struct work_struct *work = data; #endif struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel, work); int i; for (i=0; i<65536; i++) { if (tunnel->sessions[i]) free_l2tp_session(tunnel, tunnel->sessions[i]); } vfree(tunnel); num_tunnels --; } static void put_l2tp_tunnel(struct l2tp_tunnel *tunnel) { if (!atomic_dec_and_test(&tunnel->count)) return; pr_debug("freeing l2tp_tunnel: %p\n", tunnel); l2tp_tunnels[tunnel->tunnel_id] = NULL; list_del(&tunnel->list); if (tunnel->tx_socket) { struct file *file = tunnel->tx_socket->file; tunnel->tx_socket = NULL; fput(file); /* sockfd_put */ } /* Do the final teardown of any outstanding interfaces asynchronously. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) INIT_WORK(&tunnel->work, put_tunnel_work, &tunnel->work); #else INIT_WORK(&tunnel->work, put_tunnel_work); #endif queue_work(l2tp_wq, &tunnel->work); } struct l2tp_info *alloc_l2tp_info(struct sock *sk) { struct l2tp_info *l2tp; l2tp = kmem_cache_alloc(l2tp_info_cachep, GFP_KERNEL); if (!l2tp) return NULL; memset(l2tp, 0, sizeof(*l2tp)); INIT_LIST_HEAD(&l2tp->tunnel_list); l2tp->sk = sk; num_infos ++; return l2tp; } void free_l2tp_info(struct l2tp_info *l2tp) { struct list_head *pos, *next; pr_debug("free_l2tp_info(%p)\n", l2tp); again: list_for_each_safe(pos, next, &l2tp->tunnel_list) { struct l2tp_tunnel *tunnel = (struct l2tp_tunnel *)pos; pr_debug("free_l2tp_info: freeing tunnel %p\n", tunnel); put_l2tp_tunnel(tunnel); if (need_resched()) { schedule(); goto again; } } kmem_cache_free(l2tp_info_cachep, l2tp); num_infos --; } static inline struct l2tp_info *sk_l2tp_info(struct sock *sk) { return sk->sk_protinfo; } u32 skb_pull4(struct sk_buff *skb) { u32 val; unsigned char *data = skb->data; if (!data) return ~0; val = data[0]; val <<= 8; val |= data[1]; val <<= 8; val |= data[2]; val <<= 8; val |= data[3]; if (!skb_pull(skb, 4)) printk("skb_pull2: short\n"); return val; } u16 skb_pull2(struct sk_buff *skb) { u16 val; unsigned char *data = skb->data; val = data[0]; val <<= 8; val |= data[1]; if (!skb_pull(skb, 2)) printk("skb_pull2: short\n"); return val; } void l2tp_ReInput(channel_t *ch, struct sk_buff *skb) { struct l2tp_session *session = (void *)ch; struct l2tp_tunnel *tunnel = session->tunnel; struct l2tp_info *l2tp = tunnel->l2tp; u8 *data; pr_debug("l2tp_ReInput(%p/%p)\n", skb, l2tp); data = skb_push(skb, 6); *data++ = 0x00; *data++ = 0x02; *data++ = tunnel->tunnel_id >> 8; *data++ = tunnel->tunnel_id; *data++ = session->session_id >> 8; *data++ = session->session_id; if (sock_queue_rcv_skb(l2tp->sk, skb)) { //printk("queue failed\n"); kfree_skb(skb); } } static int l2tp_udp_rcv(struct sock *sk, struct sk_buff *skb) { struct l2tp_info *l2tp = sk->sk_user_data; struct l2tp_tunnel *tunnel; struct l2tp_session *session; u16 flags, len, tunnel_id, session_id, offset = 0, Ns = 0, Nr = 0; u16 *data; int pull_len; int err; skb_orphan(skb); skb->sk = NULL; skb->ip_summed = CHECKSUM_NONE; /* remove the UDP header from skb */ skb_pull(skb, 8); /* Up to 12 bytes of L2TP headers must be accessible. */ if (!pskb_may_pull(skb, 12)) return 1; len = skb->len; data = (u16 *)skb->data; flags = ntohs(*data++); if (L2TPF_Ver2 != (flags & L2TPF_Ver)) { printk(KERN_INFO "l2tp: packet flags (0x%04x)not ver 2?\n", flags); goto discard; } if (flags & L2TPF_L) len = ntohs(*data++); else len = skb->len; if (skb->len < len) { printk("discarding short packet (l2tp = %d, skb->len = %d)\n", len, skb->len); goto discard; } if (flags & L2TPF_T) { /* control packet -- pass on to userspace */ queue: err = sock_queue_rcv_skb(l2tp->sk, skb); if (err) { pr_debug("__l2tp_data_ready: queue_rcv_skb failed (%d)\n", err); goto discard; } return 1; } tunnel_id = ntohs(*data++); session_id = ntohs(*data++); pr_debug("l2tp data packet %d.%d\n", tunnel_id, session_id); /* lookup the tunnel -- does it exist? is it allowed on this socket? */ tunnel = l2tp_tunnels[tunnel_id % L2TP_MAX_TUNNELS]; if (!tunnel) { pr_debug("__l2tp_data_ready: no such tunnel 0x%04x\n", tunnel_id); goto queue; } if (!session_id) { pr_debug("__l2tp_data_ready: data packet for 0 session???\n"); goto queue; } session = tunnel->sessions[session_id % L2TP_MAX_SESSIONS]; if (!session) { pr_debug("__l2tp_data_ready: no such session 0x%04x.0x%04x\n", tunnel_id, session_id); goto queue; } if (flags & L2TPF_S) { Ns = ntohs(*data++); Nr = ntohs(*data++); } if (flags & L2TPF_O) offset = ntohs(*data++); else offset = 0; pull_len = (unsigned char *)data - skb->data; pull_len += offset; skb_pull(skb, pull_len); pr_debug("flags = 0x%04x, len = 0x%04x, tunnel = 0x%04x, session = 0x%04x\n", flags, len, tunnel_id, session_id); /* assuming we have a channel, receive the skb. */ skb->dev = NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) dst_release(skb_dst(skb)); #endif skb_dst_set(skb, NULL); #if defined(CONFIG_NF_CONNTRACK) || (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21) && defined(CONFIG_NETFILTER)) nf_conntrack_put(skb->nfct); skb->nfct = NULL; #ifdef CONFIG_NETFILTER_DEBUG skb->nf_debug = 0; #endif #endif ch_Input(&session->ch, skb); return 0; discard: skb_free_datagram(sk, skb); return 1; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) static int l2tp_encap_rcv(struct sock *sk, struct sk_buff *skb) { l2tp_udp_rcv(sk, skb); return 0; } #endif /* l2tp_data_ready * Called with the socket lock on the UDP sock lock held. */ static int __l2tp_data_ready(struct sock *sk) { struct sk_buff *skb; int err; pr_debug("__l2tp_data_ready(%p)\n", sk); do { skb = skb_recv_datagram(sk, 0, 1, &err); } while ((NULL == skb) && (err != -EAGAIN)) ; if (NULL == skb) return 0; return l2tp_udp_rcv(sk, skb); } static void l2tp_data_ready(struct sock *sk, int count) { /* hook in case we need to loop for multiple packets */ __l2tp_data_ready(sk); } int l2tp_connect(struct socket *sock, struct sockaddr *sa, int sockaddr_len, int flags) { struct l2tp_info *l2tp; struct l2tp_tunnel *tunnel; struct sockaddr_l2tp *sl; sl = (struct sockaddr_l2tp *)sa; if (sockaddr_len < sizeof(struct sockaddr_l2tp)) return -EINVAL; if (!sock->sk) return -EINVAL; l2tp = sk_l2tp_info(sock->sk); if (!l2tp) return -EINVAL; tunnel = find_get_l2tp_tunnel(sl->sl_tunnel); if (!tunnel) return -EINVAL; if (tunnel->l2tp != l2tp) { printk("l2tp_connect: l2tp mismatch\n"); put_l2tp_tunnel(tunnel); return -EINVAL; } if (!sl->sl_tunnel) { if (!sl->sl_peer_tunnel) { // Free the tunnel pr_debug("freeing l2tp tunnel %p\n", tunnel); if (l2tp_tunnels[sl->sl_tunnel] != tunnel) { printk("l2tp_connect: tunnel changed.\n"); goto out_inval; } put_l2tp_tunnel(tunnel); put_l2tp_tunnel(tunnel); return 0; } printk("l2tp: no tunnel in connect\n"); goto out_inval; } tunnel->peer_tunnel_id = sl->sl_peer_tunnel; put_l2tp_tunnel(tunnel); return 0; out_inval: put_l2tp_tunnel(tunnel); return -EINVAL; } static int l2tp_bind(struct socket *sock, struct sockaddr *_sa, int sa_len) { struct l2tp_tunnel *tunnel = NULL; struct sockaddr_l2tp *sa = (void *)_sa; struct sock *sk = sock->sk; struct l2tp_info *l2tp = sk_l2tp_info(sk); int err; if (sa_len < 0 || sa_len < sizeof(*sa)) return -EINVAL; if (sa->sl_family != AF_L2TP) return -EINVAL; if (sa->sl_rx_sfd != -1 && sa->sl_session) { printk("l2tp_bind: cannot bind session(0x%04x) to rx_sfd(%d)\n", sa->sl_session, sa->sl_rx_sfd); return -EINVAL; } if (sa->sl_tx_sfd != -1 && sa->sl_session) { printk("l2tp_bind: cannot bind session(0x%04x) to tx_sfd(%d)\n", sa->sl_session, sa->sl_tx_sfd); return -EINVAL; } /* subtle sematics: new tunnels will only be created for session * ids of 0, which is to say for a new control session. */ tunnel = find_get_l2tp_tunnel(ntohs(sa->sl_tunnel)); if (!tunnel && !sa->sl_session) tunnel = alloc_l2tp_tunnel(l2tp, ntohs(sa->sl_tunnel)); if (!tunnel) { printk("l2tp_bind: no tunnel\n"); return -EBUSY; } sa->sl_tunnel = htons(tunnel->tunnel_id); if (sa->sl_rx_sfd != -1) { err = -EBUSY; if (SS_UNCONNECTED != sock->state) goto out_err; if (l2tp->rx_socket) { printk("attempt to bind busy l2tp to rx socket\n"); goto out_err; } l2tp->rx_socket = sockfd_lookup(sa->sl_rx_sfd, &err); if (l2tp->rx_socket == NULL) goto out_err; pr_debug("cool: have a socket (%p)->sk (%p)->socket = %p.\n", l2tp->rx_socket, l2tp->rx_socket->sk, l2tp->rx_socket->sk ? l2tp->rx_socket->sk->sk_socket : (void*)-1); lock_sock(l2tp->rx_socket->sk); l2tp->rx_socket->sk->sk_user_data = l2tp; l2tp->rx_socket->sk->sk_data_ready = l2tp_data_ready; l2tp->rx_socket->sk->sk_sndbuf = 262144; l2tp->rx_socket->sk->sk_rcvbuf = 262144; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) udp_sk(l2tp->rx_socket->sk)->encap_type = UDP_ENCAP_L2TPINUDP; udp_sk(l2tp->rx_socket->sk)->encap_rcv = l2tp_encap_rcv; #endif release_sock(l2tp->rx_socket->sk); sock->state = SS_CONNECTED; } if (sa->sl_tx_sfd != -1) { err = -EBUSY; if (SS_CONNECTED != sock->state) { printk("attempt to bind tunnel tx without rx socket\n"); goto out_err; } if (tunnel->tx_socket) { printk("attempt to bind busy tunnel to tx socket\n"); goto out_err; } tunnel->tx_socket = sockfd_lookup(sa->sl_tx_sfd, &err); if (tunnel->tx_socket == NULL) goto out_err; pr_debug("cool: have a tx socket (%p)->sk (%p)->tx_socket = %p.\n", tunnel->tx_socket, tunnel->tx_socket->sk, tunnel->tx_socket->sk ? tunnel->tx_socket->sk->sk_socket : (void*)-1); tunnel->tx_socket->sk->sk_sndbuf = 262144; tunnel->tx_socket->sk->sk_rcvbuf = 262144; } if (SS_CONNECTED != sock->state) { printk("l2tp: post-sockets, not connected!\n"); goto out_err; } err = -EINVAL; if (!sa->sl_session && !l2tp->rx_socket) { printk("no session no rx socket\n"); goto out_err; } err = -EINVAL; if (sa->sl_session && !tunnel->tx_socket) { printk("have session no tx socket\n"); goto out_err; } err = -EINVAL; if (sa->sl_session && l2tp->rx_socket) { printk("have session have socket\n"); goto out_err; } err = -EBUSY; if (tunnel->sessions[sa->sl_session]) { printk("session 0x%04x in use\n", sa->sl_session); goto out_err; } memcpy(&l2tp->l2tp_id, sa, sizeof(l2tp->l2tp_id)); pr_debug("l2tp_bind: okay. sock->sk = %p\n", sock->sk); return 0; out_err: if (tunnel) put_l2tp_tunnel(tunnel); return err; } static int l2tp_release(struct socket *sock) { struct l2tp_info *l2tp; struct sock *sk = sock->sk, *udp_sk = NULL; /* FIXME: release any bound sockets, any children */ pr_debug("l2tp_release\n"); if (!sk) { printk("l2tp_release: no sock?\n"); return 0; } l2tp = sk_l2tp_info(sk); /* Lock the UDP socket to flush out any incoming packet rx. */ if (l2tp && l2tp->rx_socket && (udp_sk = l2tp->rx_socket->sk)) { lock_sock(udp_sk); sock_set_flag(udp_sk, SOCK_DEAD); /* prevent data_ready() */ } sk->sk_protinfo = NULL; if (l2tp) { struct socket *rx_socket = l2tp->rx_socket; /* Lock the udp socket to flush out any in progress rx. */ if (rx_socket && (udp_sk = rx_socket->sk)) l2tp->rx_socket = NULL; free_l2tp_info(l2tp); if (NULL != rx_socket) { pr_debug("releasing rx_socket\n"); release_sock(udp_sk); udp_sk = NULL; fput(rx_socket->file); /* sockfd_put */ } } sock->sk = NULL; if (udp_sk) release_sock(udp_sk); sk_free(sk); pr_debug("l2tp_release: done\n"); return 0; } /* * ripped from ipv4/raw.c * * This should be easy, if there is something there * we return it, otherwise we block. */ int __l2tp_recvmsg(struct sock *sk, struct msghdr *msg, int len, int noblock, int flags, int *addr_len) { int copied = 0; int err = -EOPNOTSUPP; struct sockaddr_l2tp *saddr = (struct sockaddr_l2tp *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*saddr); if (flags & MSG_ERRQUEUE) { /*err = ip_recv_error(sk, msg, len);*/ err = 0; /* FIXME: pass on errors */ goto out; } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (saddr) { memset(saddr, 0, sizeof(*saddr)); saddr->sl_family = AF_L2TP; /* FIXME: what's the address? */ } done: skb_free_datagram(sk, skb); out: return err ? : copied; } /* ripped from af_inet.c */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) static int l2tp_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) #else static int l2tp_recvmsg(struct socket *sock, struct msghdr *msg, int size, int flags, struct scm_cookie *scm) #endif { struct sock *sk = sock->sk; int addr_len = 0; int err; err = __l2tp_recvmsg(sk, msg, size, flags&MSG_DONTWAIT, flags&~MSG_DONTWAIT, &addr_len); if (err >= 0) msg->msg_namelen = addr_len; //printk("l2tp_recvmsg: %d\n", err); return err; } static int l2tp_getname(struct socket *sock, struct sockaddr *sa, int *lenp, int peer) { struct sock *sk = sock->sk; struct l2tp_info *l2tp = sk_l2tp_info(sk); if (peer) return -EINVAL; memcpy(sa, &l2tp->l2tp_id, sizeof(l2tp->l2tp_id)); *lenp = sizeof(l2tp->l2tp_id); return 0; } void l2tp_bab_use(channel_t *ch) { printk("l2tp_bab_use\n"); } void l2tp_bab_unuse(channel_t *ch) { printk("l2tp_bab_unuse\n"); } #ifdef L2TP_HARDWIRE int l2tp_hardwire_output(struct l2tp_session *session, struct sk_buff *skb, struct net_device *hw_ether_dev) { u16 len = skb->len; u8 *data = skb_push(skb, 48); struct iphdr *iph; struct udphdr *uh; if (!data) { session->ch.stats.collisions++; dev_kfree_skb(skb); return 0; } /* ether header */ memcpy(data + 0, session->hw_gw_mac, 6); /* dst mac */ memcpy(data + 6, hw_ether_dev->dev_addr, 6); /* src mac */ *(u16 *)&data[12] = htons(0x0800); /* proto */ /* ip hdr */ data[14] = 0x45; /* version(4) ihl(5) */ data[15] = 0x00; /* tos */ *(u16 *)(data+16) = htons(len + 48 - 14); /* tot_len */ *(u16 *)(data+18) = htons(0x0000); /* id */ *(u16 *)(data+20) = htons(0x0000); /* frag_off */ data[22] = 0x40; /* ttl */ data[23] = 0x11; /* protocol (UDP) */ *(u16 *)(data+24) = htons(0x0000); /* check */ *(u32 *)(data+26) = session->hw_src_ip; /* saddr */ *(u32 *)(data+30) = session->hw_dst_ip; /* daddr */ iph = (struct iphdr *)(data+14); iph->check = ip_fast_csum(data + 14, 5); /* udp hdr */ *(u16 *)(data+34) = session->hw_src_port; /* source port */ *(u16 *)(data+36) = session->hw_dst_port; /* dest port */ *(u16 *)(data+38) = htons(len + 6 + 8); /* len + l2tp + udp */ *(u16 *)(data+40) = htons(0x0000); /* check */ /* l2tp header */ *(u16 *)(data+42) = htons(0x0002); /* flags = data pkt, no seq */ *(u16 *)(data+44) = session->hw_peer_tunnel; /* peer tunnel id */ *(u16 *)(data+46) = session->hw_peer_session; /* peer session id */ skb->dev = hw_ether_dev; skb_set_mac_header(skb, 0); skb_set_network_header(skb, 14); skb_set_transport_header(skb, 34); skb->protocol = __constant_htons(0x0800); if (session->tunnel->csum_payload) { skb->ip_summed = CHECKSUM_NONE; goto no_csum; } if (!(skb->dev->features & NETIF_F_IP_CSUM)) { skb->csum = skb_checksum(skb, /*skb_transport_offset*/34, skb->len - 34, 0); } else skb->csum = 0; /* set up UDP checksum offload, only for the UDP+L2TP header */ uh = (struct udphdr *)(data + 34); uh->check = csum_tcpudp_magic(iph->saddr, iph->daddr, len + 6 + 8/*48*/, IPPROTO_UDP, skb->csum); if (!(skb->dev->features & NETIF_F_IP_CSUM)) { if (uh->check == 0) uh->check = 0xffff; skb->ip_summed = CHECKSUM_NONE; } else { uh->check = ~uh->check; #if defined(CHECKSUM_HW) /* pre-2.6.19 */ skb->ip_summed = CHECKSUM_HW; skb->csum = offsetof(struct udphdr, check); #else skb->ip_summed = CHECKSUM_PARTIAL; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) skb->csum_offset = offsetof(struct udphdr, check); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) skb->csum_start = 34; #else skb->csum = offsetof(struct udphdr, check); #endif #endif } no_csum: return dev_queue_xmit(skb); } #endif /* def L2TP_HARDWIRE */ int l2tp_bab_output(channel_t *ch, struct sk_buff *skb) { struct l2tp_session *session = (void *)ch; struct sock *sk = session->tunnel->tx_socket->sk; struct inet_sock *inet = inet_sk(sk); u16 *data, sport, dport; int err; int new_headroom, old_headroom; int len = skb->len; int is_v6; u16 *csum_data; /* recursion idea taken from ip_gre */ if (unlikely(session->recursion++) || unlikely(!session->tunnel->tx_socket)) goto tx_error; pr_debug("l2tp_bab_output(%p)\n", skb); is_v6 = (session->tunnel->tx_socket->sk->sk_family == PF_INET6); old_headroom = skb_headroom(skb); new_headroom = NET_SKB_PAD / 4 + (is_v6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)) + sizeof(struct udphdr) + 6 + 6 + 6 + 2; /* src mac, dst mac, proto */ pr_debug("l2tp_bab_output: old headeroom = %d new headroom = %d\n", old_headroom, new_headroom); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) if (skb_cow_head(skb, new_headroom)) goto tx_error; #endif pr_debug("l2tp_bab_output: post new headroom = %d\n", skb_headroom(skb)); if (skb_headroom(skb) < new_headroom || skb_cloned(skb) || skb_shared(skb)) { struct sk_buff *newskb = skb_realloc_headroom(skb, new_headroom); pr_debug("realloc'd\n"); if (!newskb) { printk("l2tp_bab_output: skb_realloc_headroom failed\n"); session->ch.stats.tx_dropped++; goto tx_error; } if (skb->sk) skb_set_owner_w(newskb, skb->sk); dev_kfree_skb(skb); skb = newskb; } skb_orphan(skb); skb->truesize += new_headroom - old_headroom; pr_debug("memset'd\n"); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); pr_debug("dst'd\n"); dst_release(skb_dst(skb)); skb_dst_set(skb, NULL); rcu_read_lock(); #ifdef L2TP_HARDWIRE { struct net_device *hw_ether_dev; hw_ether_dev = rcu_dereference(session->hw_ether_dev); if (hw_ether_dev) { err = l2tp_hardwire_output(session, skb, hw_ether_dev); session->recursion--; rcu_read_unlock(); return err; } } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) skb_dst_set(skb, dst_clone(__sk_dst_get(session->tunnel->tx_socket->sk))); #endif skb->ip_summed = CHECKSUM_NONE; pr_debug("l2tp_bab_output: len = %d\n", skb->len); data = (u16 *)skb_push(skb, 14); pr_debug("l2tp_bab_output: len = %d\n", skb->len); skb_reset_transport_header(skb); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) #define inet_sport sport #define inet_dport dport #endif sport = inet->inet_sport; dport = inet->inet_dport; #else sport = session->tunnel->tx_socket->sk->sport; dport = session->tunnel->tx_socket->sk->dport; #define ip_queue_xmit(x,y) ip_queue_xmit(x) #endif *data++ = sport; // udp source *data++ = dport; // udp dest *data++ = htons(skb->len); // udp len csum_data = data; *data++ = htons(0x0000); // udp csum *data++ = htons(0x0002); *data++ = htons(session->tunnel->peer_tunnel_id); *data++ = htons(session->peer_session_id); skb_set_owner_w(skb, session->tunnel->tx_socket->sk); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); #endif skb->ip_summed = CHECKSUM_NONE; if (!is_v6 && !session->tunnel->csum_payload) { csum_data = NULL; } else { unsigned offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); printk("skb->csum = 0x%04x\n", skb->csum); } /* Make sure fragmentation is permitted */ skb->local_df = 1; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (is_v6) { struct ipv6_pinfo *np = inet6_sk(session->tunnel->tx_socket->sk); #if 0 struct flowi fl = { .proto = IPPROTO_UDP, .fl_ip_dport = dport, .fl_ip_sport = sport, .mark = session->tunnel->tx_socket->sk->sk_mark, .fl6_flowlabel = np->flow_label, }; ipv6_addr_copy(&fl.fl6_dst, &np->daddr); err = ip6_xmit(session->tunnel->tx_socket->sk, skb, &fl, NULL, 1); #endif *csum_data = csum_ipv6_magic(&np->saddr, &np->daddr, skb->len, IPPROTO_UDP, skb->csum); if (*csum_data == 0) *csum_data = CSUM_MANGLED_0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) err = inet6_csk_xmit(skb, NULL); #else err = inet6_csk_xmit(skb, 1); #endif } else #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) struct flowi fl = { .u.ip4.fl4_sport = sport, .u.ip4.fl4_dport = dport, }; struct rtable *rt; rt = ip_route_output_ports(sock_net(sk), &fl.u.ip4, sk, inet->inet_daddr, inet->inet_saddr, dport, sport, IPPROTO_UDP, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (!rt) goto tx_error; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) /* TODO: cache dst and use skb_dst_set_noref(skb, &rt->dst); */ #endif skb_dst_set(skb, &rt->dst); err = ip_queue_xmit(skb, &fl); #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) #define ip_queue_xmit(x,y) ip_queue_xmit(x) #endif err = ip_queue_xmit(skb, 1); #endif } rcu_read_unlock(); if (err) printk("l2tp_bab_output: err = %d\n", err); else { session->ch.stats.tx_bytes += len; session->ch.stats.tx_packets ++; } session->recursion--; return 0; tx_error: rcu_read_unlock(); pr_debug("tx_error\n"); session->ch.stats.collisions++; dev_kfree_skb(skb); session->recursion--; pr_debug("tx_error done\n"); return 0; } int l2tp_bab_connect(channel_t *ch, const char *num, u32 flags) { printk("l2tp_bab_connect\n"); return -EINVAL; } int l2tp_bab_hangup(channel_t *ch) { struct l2tp_session *session = (void *)ch; struct l2tp_tunnel *tunnel = session->tunnel; struct l2tp_info *l2tp = tunnel->l2tp; if (!l2tp || !l2tp->rx_socket || !l2tp->rx_socket->sk) { printk(KERN_DEBUG "l2tp_bab_hangup: NULL!\n"); return -EINVAL; } lock_sock(l2tp->rx_socket->sk); if (session->ch.state != CS_CONNECTED) { printk(KERN_DEBUG "l2tp_bab_hangup: channel state %d!\n", session->ch.state); return -EINVAL; } session->ch.state = CS_DISCONNECTING; free_l2tp_session(tunnel, session); release_sock(l2tp->rx_socket->sk); return 0; } #ifdef L2TP_HARDWIRE struct l2tp_rcu_put_dev { struct rcu_head rcu_head; struct net_device *dev; }; static void l2tp_rcu_put(struct rcu_head *rcu_head) { struct l2tp_rcu_put_dev *put = (void *)rcu_head; dev_put(put->dev); kfree(put); } #endif int l2tp_bab_ioctl(channel_t *ch, unsigned int cmd, unsigned long arg) { #ifdef L2TP_HARDWIRE if (cmd == BIOC_L2TP_HARDWIRE_SESSION) { struct l2tp_hardwire_info info; struct l2tp_session *session = (void *)ch; struct net_device *dev = NULL, *old_dev; struct l2tp_rcu_put_dev *put; if (copy_from_user(&info, (void *)arg, sizeof(info))) return -EFAULT; /* FIXME: netns */ if (info.dev_name[0]) { dev = dev_get_by_name(&init_net, info.dev_name); if (!dev) return -ENOENT; } put = kzalloc(sizeof(*put), GFP_KERNEL); if (!put) { if (dev) dev_put(dev); return -ENOMEM; } rcu_read_lock(); spin_lock(&tunnels_lock); old_dev = rcu_dereference(session->hw_ether_dev); session->hw_src_ip = info.hw_src_ip; session->hw_dst_ip = info.hw_dst_ip; session->hw_src_port = info.hw_src_port; session->hw_dst_port = info.hw_dst_port; session->hw_peer_tunnel = info.hw_peer_tunnel; session->hw_peer_session = info.hw_peer_session; memcpy(session->hw_gw_mac, info.hw_gw_mac, 6); rcu_assign_pointer(session->hw_ether_dev, dev); spin_unlock(&tunnels_lock); rcu_read_unlock(); INIT_RCU_HEAD(&put->rcu_head); if (old_dev) { put->dev = old_dev; call_rcu(&put->rcu_head, l2tp_rcu_put); } else kfree(put); return 0; } #endif return -EINVAL; } static struct l2tp_session *setup_session(struct l2tp_tunnel *tunnel, u16 session_id, u16 peer_session_id) { struct l2tp_session *session; int err; pr_debug("setup_session\n"); session = kmem_cache_alloc(session_cachep, GFP_KERNEL); if (!session) return NULL; memset(session, 0, sizeof(*session)); snprintf(session->ch.device_name, sizeof(session->ch.device_name), "l2tp%u.%u", tunnel->tunnel_id, session_id); strcpy(session->ch.dev_class, "l2tp"); session->tunnel = tunnel; session->session_id = session_id; session->peer_session_id = peer_session_id; session->ch.mru = 1492; session->ch.use = l2tp_bab_use; session->ch.unuse = l2tp_bab_unuse; session->ch.Output = l2tp_bab_output; session->ch.Connect = l2tp_bab_connect; session->ch.Hangup = l2tp_bab_hangup; session->ch.ioctl = l2tp_bab_ioctl; session->ch.ReInput = l2tp_ReInput; set_busy(&session->ch); err = RegisterChannel(&session->ch); if (err) { printk("RegisterChannel: %d\n", err); kmem_cache_free(session_cachep, session); return NULL; } pr_debug("session setup!\n"); clear_busy(&session->ch); session->ch.state = CS_CONNECTED; session->ch.Open(&session->ch); session->ch.Up(&session->ch); ch_ioctl(NULL, NULL, session->ch.link, BIOC_SETLCFL, BF_PPP); ch_ioctl(NULL, NULL, session->ch.link, BIOC_SETRCFL, BF_PPP); tunnel->sessions[session_id] = session; return session; } static void l2tp_session_rcu_put(struct rcu_head *rcu_head) { struct l2tp_session *session; session = container_of(rcu_head, struct l2tp_session, rcu_head); #ifdef L2TP_HARDWIRE if (session->hw_ether_dev) dev_put(session->hw_ether_dev); #endif kmem_cache_free(session_cachep, session); } static void free_l2tp_session(struct l2tp_tunnel *tunnel, struct l2tp_session *session) { spin_lock(&tunnels_lock); if (tunnel->sessions[session->session_id] != session) session = NULL; else { rcu_assign_pointer(tunnel->sessions[session->session_id], NULL); } spin_unlock(&tunnels_lock); if (session) { UnregisterChannel(&session->ch); INIT_RCU_HEAD(&session->rcu_head); call_rcu(&session->rcu_head, l2tp_session_rcu_put); } } static int l2tp_join_bundle(struct l2tp_info *l2tp, unsigned int cmd, struct l2tp_join_bundle *j) { struct l2tp_tunnel *tunnel; struct l2tp_session *session; int ret; pr_debug("l2tp_join_bundle\n"); tunnel = find_get_l2tp_tunnel(j->tunnel); #if 0 if (!tunnel) tunnel = alloc_l2tp_tunnel(l2tp, j->tunnel); #endif if (!tunnel || tunnel->l2tp != l2tp) { printk("where's the tunnel(%d)?\n", j->tunnel); return -EINVAL; } if (cmd == BIOC_L2TP_SET_TUNNEL_CSUM_PAYLOAD) { ret = -EINVAL; if (j->peer_tunnel && j->peer_tunnel != tunnel->peer_tunnel_id) goto out; if (j->session || j->peer_session) goto out; if (j->arg != (j->arg & 1)) goto out; tunnel->csum_payload = j->arg; ret = 0; goto out;; } if (!tunnel->peer_tunnel_id) tunnel->peer_tunnel_id = j->peer_tunnel; if (!tunnel->peer_tunnel_id) { printk("what's the tunnel(%d)'s peer???\n", j->tunnel); put_l2tp_tunnel(tunnel); return -EINVAL; } if (!j->session || !j->peer_session) { printk("session not set %d/%d\n", j->session, j->peer_session); put_l2tp_tunnel(tunnel); return -EINVAL; } session = tunnel->sessions[j->session]; if (!session) session = setup_session(tunnel, j->session, j->peer_session); if (!session) { printk("where's the session(%d)?\n", j->session); put_l2tp_tunnel(tunnel); return -ENOMEM; } if (cmd == BIOC_L2TP_HARDWIRE_SESSION) ret = l2tp_bab_ioctl(&session->ch, cmd, j->arg); else ret = ch_ioctl(NULL, NULL, session->ch.link, cmd, j->arg); out: put_l2tp_tunnel(tunnel); return ret; } static int l2tp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct l2tp_info *l2tp = sk_l2tp_info(sk); struct l2tp_join_bundle tmp; if (copy_from_user(&tmp, (void *)arg, sizeof(tmp))) return -EFAULT; if (tmp.arg != (unsigned long)tmp.arg) return -EINVAL; return l2tp_join_bundle(l2tp, cmd, &tmp); } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) static struct proto l2tp_proto = { .name = "l2tp", .owner = THIS_MODULE, .obj_size = sizeof(struct sock), }; #endif static struct proto_ops l2tp_proto_ops = { family: PF_L2TP, mmap: sock_no_mmap, sendpage: sock_no_sendpage, poll: datagram_poll, getname: l2tp_getname, bind: l2tp_bind, connect: l2tp_connect, release: l2tp_release, recvmsg: l2tp_recvmsg, ioctl: l2tp_ioctl, #ifdef CONFIG_COMPAT compat_ioctl: l2tp_ioctl, #endif }; /* l2tp_create * prepare a new l2tp socket. */ static int l2tp_create( #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) struct net *net, #endif struct socket *sock, int protocol #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33) || (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) && defined(RHEL_MAJOR)) , int kern #endif ) { struct sock *sk; if (protocol) return -EINVAL; sk = my_sk_alloc(AF_L2TP, GFP_KERNEL, 1, NULL, &l2tp_proto); if (unlikely(!sk)) return -ENOBUFS; sk->sk_sndbuf = 262144; sk->sk_rcvbuf = 262144; sk->sk_protinfo = alloc_l2tp_info(sk); if (!sk->sk_protinfo) { sk_free(sk); return -ENOBUFS; } sock_init_data(sock, sk); sock->ops = &l2tp_proto_ops; pr_debug("l2tp_create: good\n"); return 0; } static struct net_proto_family l2tp_family_ops = { family: PF_L2TP, create: l2tp_create, }; /* module setup is easy -- just register our protocol family and let er rip. */ static int __init l2tp_init(void) { int ret; session_cachep = kmem_cache_create("l2tp_session", sizeof(struct l2tp_session), 0, 0, NULL); if (!session_cachep) { printk(KERN_ERR "l2tp: can't create l2tp_session slab\n"); return -ENOMEM; } l2tp_info_cachep = kmem_cache_create("l2tp_info", sizeof(struct l2tp_info), 0, 0, NULL); if (!l2tp_info_cachep) { printk(KERN_ERR "l2tp: can't create session l2tp_info slab\n"); kmem_cache_destroy(session_cachep); return -ENOMEM; } ret = sock_register(&l2tp_family_ops); if (ret) { printk(KERN_ERR "l2tp: can't register socket family"); kmem_cache_destroy(l2tp_info_cachep); kmem_cache_destroy(session_cachep); return ret; } l2tp_wq = create_workqueue("l2tp"); if (!l2tp_wq) { printk(KERN_ERR "unable to create workqueue for l2tp\n"); sock_unregister(AF_L2TP); kmem_cache_destroy(l2tp_info_cachep); kmem_cache_destroy(session_cachep); return -ENOMEM; } printk(KERN_NOTICE "l2tp is loaded (build %d)\n", (int)L2TP_BUILD); return 0; } /* cleanup is easy too: unregister the protocol family and make sure all * our data structures are freed. */ static void l2tp_cleanup(void) { destroy_workqueue(l2tp_wq); sock_unregister(AF_L2TP); kmem_cache_destroy(l2tp_info_cachep); kmem_cache_destroy(session_cachep); /* FIXME: assert that all structures are gone */ printk(KERN_NOTICE "no more l2tp (build %d) (num_tunnels %d num_infos %d)\n", (int)L2TP_BUILD, num_tunnels, num_infos); } module_init(l2tp_init); module_exit(l2tp_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Benjamin LaHaise "); MODULE_DESCRIPTION("L2TP/Babylon");