/* * babylon.c - Babylon Core Kernel module * Copyright (C) 1997-2000 SpellCaster Telecommunications Inc. * $Id: kern.c,v 1.13 2004/08/24 01:46:14 bcrl Exp $ * Released under the GNU Public License. See LICENSE file for details. */ #include "../include/bab_module.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) #include #endif #include "../include/vercomp.h" #include "../include/aps_if.h" #include "bab.h" #ifndef VER #define VER "test" #endif #if LINUX_VERSION_CODE > 0x20100 #include #endif #define pr_drop pr_debug #define BAB_MAJOR 60 #define PPP_PROTO_IP 0x21 #define PPP_PROTO_IPX 0x2b #define PPP_PROTO_VJ_C 0x2d #define PPP_PROTO_VJ_UN 0x2f #define PPP_PROTO_MP 0x3d #define PPP_PROTO_IPV6 0x57 #ifdef __mips__ #define B_MAX_DEV (192U) #else #define B_MAX_DEV (256*1024U) #endif #define B_REGISTER_DEV 64 #define CH_TX_Q_LEN 3 struct lock_class_key bundle_lock_key; struct lock_class_key chan_lock_key; static struct kmem_cache *chan_cachep; #define B_DEVS_REGISTERING ((void *)-1L) static struct bundle *b_devs[B_MAX_DEV]; static DEFINE_SPINLOCK(b_devs_lock); static channel_t *channels[B_MAX_DEV]; static struct chan *calls[B_MAX_DEV]; static unsigned callid; /* protects allocating channeli from channels[] */ static DEFINE_SPINLOCK(channels_lock); static struct sk_buff_head rx_q; static void put_call(struct chan *call); static void leave_bundle(struct chan *ch); static void demux_pkt(struct bundle *b, struct sk_buff *skb); static void __b_xmit(struct bundle *b, struct sk_buff *skb, u16 proto); static int bdev_ioctl(struct bundle *b, unsigned int cmd, unsigned long arg); static void kick_tx(struct chan *call); static int ch_Output(struct chan *ch, struct sk_buff *skb) { int ret; ch->activity_jiffies = jiffies; ret = ch->ch->Output(ch->ch, skb); return ret; } static void xor_to_buf(u8 *to, u8 *from, int size) { while (size >= sizeof(long)) { *(long *)to ^= *(long *)from; to += sizeof(long); from += sizeof(long); size -= sizeof(long); } while (size-- > 0) *to++ ^= *from++; } static struct sk_buff *make_frag(struct bundle *b, int first) { struct sk_buff *frag_skb, *skb; int left = b->frag_split - b->frag_num; unsigned char *p; int bytes; int rain_frag = 0; if (left < 1) { pr_debug("left: %d\n", left); if (b->rflags & BF_RAIN) rain_frag = 1; else BUG(); pr_debug("rain_frag: %d\n", rain_frag); } frag_skb = b->frag_skb; if (rain_frag) { unsigned size = 0; int i; for (i=0; ifrag_num; i++) if (b->frag_lens[i] > size) size = b->frag_lens[i]; bytes = size; pr_debug("rain bytes: %d i=%d\n", bytes, i); } else bytes = (frag_skb->len - b->frag_offset + left - 1) / left; b->frag_lens[b->frag_num] = bytes; skb = dev_alloc_skb(bytes + (rain_frag ? 2 : 0)); if (!skb) goto out; p = skb_push(skb, 1 + (!(b->rflags & BF_PFC) ? 1 : 0) + ((b->rflags & BF_SSN) ? 2 : 4)); if (!(b->rflags & BF_PFC)) *p++ = 0; *p++ = PPP_PROTO_MP; if (b->rflags & BF_SSN) { u16 val = (b->frag_seq | ((left == 1) ? 0x4000 : 0) | (first ? 0x8000 : 0)); if (rain_frag) val |= 0x2000; /* I don't cast to u16 here as we're unaligned here */ *p++ = (val >> 8); *p++ = val; b->frag_seq = (b->frag_seq + 1) & 0xfff; } else { u32 val = (b->frag_seq | ((left == 1) ? 0x40000000 : 0) | (first ? 0x80000000 : 0)); if (rain_frag) val |= 0x2000000; *p++ = (val >> 24); *p++ = (val >> 16); *p++ = (val >> 8); *p++ = val; b->frag_seq = (b->frag_seq + 1) & 0xffffff; } #if LINUX_VERSION_CODE < 0x20100 skb->free = FREE_READ; #endif /* this is an overly paranoid check. it shouldn't happen, * but i'd rather not reboot if there's a bug somewhere. */ if (bytes < 0) goto out_doh; if (!rain_frag) memcpy(skb_put(skb, bytes), frag_skb->data + b->frag_offset, bytes); else { char *data = skb_put(skb, bytes + 2); unsigned frag_offset = 0; int i; pr_debug("data = %p, p = %p\n", data, p); *data++ = ((b->frag_num << 4) & 0xf0) | ((frag_skb->len >> 8) & 0xf); *data++ = frag_skb->len & 0xff; memset(data, 0, bytes); for (i=0; ifrag_num; i++) { unsigned cur = b->frag_lens[i]; if (bytes < cur) cur = bytes; xor_to_buf(data, frag_skb->data + frag_offset, cur); frag_offset += b->frag_lens[i]; } } pr_debug("made frag len=%d off=%d num=%d seq=%08x\n", skb->len, b->frag_offset, b->frag_num, b->frag_seq); b->frag_offset += bytes; b->frag_num++; if (((1 == left) && !(b->rflags & BF_RAIN)) || (0 == left)) { pr_debug("freed frag_skb len=%d left=%d\n", b->frag_skb->len, left); b_dev_kfree_skb(b->frag_skb); b->frag_skb = NULL; } /* The code path for the first packet already does ACFC for us. */ if (!first && !(b->rflags & BF_ACFC)) { p = skb_push(skb, 2); *p++ = 0xff; *p++ = 0x03; } pr_debug("make_frag: len=%d, data=\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n" , skb->len, skb->data[ 0], skb->data[ 1], skb->data[ 2], skb->data[ 3], skb->data[ 4], skb->data[ 5], skb->data[ 6], skb->data[ 7], skb->data[ 8], skb->data[ 9], skb->data[10], skb->data[11], skb->data[12], skb->data[13], skb->data[14], skb->data[15] ); return skb; out_doh: printk(KERN_ALERT "%s: oh no! bytes<0!!!\n", b->name); b_dev_kfree_skb(skb); out: /* out of memory. damn. bump the sequence number (so the other end can detect a fragment loss, * and drop the fragment to free up some memory. */ b_dev_kfree_skb(b->frag_skb); b->frag_skb = NULL; b->frag_seq++; b->frag_seq &= (b->rflags & BF_SSN) ? 0xfff : 0xffffff; return NULL; } static void kick_tx(struct chan *call) { struct sk_buff *skb; if (test_busy(call->ch)) { pr_debug("%s: kick_tx: busy\n", call->ch->device_name); return; } /* First things first: attempt to transmit any packets that are queued on the channel. */ while (!test_busy(call->ch) && (skb = skb_dequeue(&call->tx_q))) { pr_debug("%s: kick_tx: outputting\n", call->ch->device_name); if (!ch_Output(call, skb)) { pr_debug("%s: kick_tx: good tx\n", call->ch->device_name); wake_up_interruptible(&call->tx_wait); wake_up_interruptible(&call->wait); } else { pr_debug("%s: kick_tx: output failed\n", call->ch->device_name); skb_queue_head(&call->tx_q, skb); return; } } /* now we send out any multilink packets that are queued for the bundle */ if (!test_busy(call->ch) && call->bundle) { struct bundle *b = call->bundle; spin_lock(&b->bundle_lock); if (b->tx_skb && !ch_Output(call, b->tx_skb)) { b->bdev->trans_start = jiffies; b->tx_skb = NULL; if (!b->frag_skb || !(b->tx_skb = make_frag(b, 0))) netif_wake_queue(b->bdev); } spin_unlock(&b->bundle_lock); } else pr_debug("busy 2\n"); } static void __b_xmit(struct bundle *b, struct sk_buff *skb, u16 proto) { struct chan *ch; u8 *p; int i; if (proto <= 0xff && (b->rflags & BF_PFC) && (proto & 0x01)) p = skb_push(skb, 1); else { p = skb_push(skb, 2); *p++ = proto >> 8; } *p++ = proto; pr_debug("b_net_xmit: len=%d, data=%02x %02x %02x %02x %02x %02x %02x %02x\n", skb->len, skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7] ); /* HACK: we do this before dropping the packet so the daemon * can determine if someone's trying to transmit. -ben */ b->tx_bytes += skb->len; ch = b->chan; if (!ch) goto drop; b->stats.tx_packets ++; #if LINUX_VERSION_CODE >= 0x20100 b->stats.tx_bytes += skb->len; #endif /* Note that the way this is arranged causes all channels with * multilink enabled to put multilink headers on all packets * when more than one channel is in the bundle. This behaviour * is REQUIRED to work around a bug in certain routers: they * ignore IP packets w/o an ML header. It's also required for * ordering purposes when using compression. -ben */ if ((b->rflags & BF_PASS_ML) && ((b->num_chan > 1) || (skb->len > b->chan->mtu))) { int num_idle = 0; /* okay, we're going to make a fragment */ b->frag_offset = 0; b->frag_num = 0; b->frag_split = 0; for (i=0; inum_chan; i++, ch = ch->next) { if (!test_busy(ch->ch)) num_idle ++; } ch = b->chan; if (num_idle < 1) num_idle = b->num_chan; /* try to ensure that the smallest packet sent out is at * least 40 bytes. This helps thruput on bundles with a * large number of channels. */ while (num_idle > 1 && (skb->len / num_idle) < 40) num_idle --; if (num_idle < b->min_frags) num_idle = b->min_frags; if (num_idle > b->max_frags) num_idle = b->max_frags; while ((skb->len / num_idle) > ch->mtu) num_idle++; b->frag_split = num_idle; b->frag_skb = skb; skb = make_frag(b, 1); if (!skb) { static int make_frag_warn; if (++make_frag_warn < 5) printk(KERN_DEBUG"%s: ugh -- make_frag failed\n", b->name); goto drop_nofree; } } if (!(b->rflags & BF_ACFC)) { /* not a fragment, so put ACFC on it if needed */ p = skb_push(skb, 2); *p++ = 0xff; *p++ = 0x03; } i = 0; while (i++ < b->num_chan) { struct chan *next = ch->next; if (test_busy(ch->ch) || ch_Output(ch, skb)) { ch = next; continue; } ch = next; b->chan = next; i = 0; /* normal fast path -- we've still got fragments, and are successfully making the pieces to transmit */ if (b->frag_skb && (skb = make_frag(b, 0))) continue; b->bdev->trans_start = jiffies; pr_debug("b_net_xmit(end): len=%d, data=%02x %02x %02x %02x %02x %02x %02x %02x\n", skb->len, skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7] ); /* our work here is done. */ netif_wake_queue(b->bdev); return; } b->tx_skb = skb; return; drop: b_dev_kfree_skb(skb); drop_nofree: b->stats.tx_dropped++; netif_wake_queue(b->bdev); } static int b_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct bundle *b = netdev_priv(dev); u16 proto; /* FIXME: make this zero copy where possible */ if (!pskb_may_pull(skb, skb->len)) return 0; #if LINUX_VERSION_CODE < 0x02032B if (test_and_set_bit(0, &dev->tbusy)) goto tx_busy; #else netif_stop_queue(dev); #endif /* pull off the header we threw on in b_hard_header */ skb_pull(skb, dev->hard_header_len); switch (ntohs(skb->protocol)) { case ETH_P_IP: proto = PPP_PROTO_IP; if (!(b->lflags & BF_PASS_IP)) goto drop; #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) { struct sk_buff *newskb; int newlen; u8 *cp; if (!(b->lflags & BF_VJC)) break; newskb = dev_alloc_skb(skb->len + 128); if (!newskb) goto drop; cp = skb->data; newlen = slhc_compress(b->slhc, cp, skb->len, newskb->data, &cp, 1); if (newlen <= 0) goto drop; if (cp == skb->data) { /* unchanged, so transmit as normal ip */ b_dev_kfree_skb(newskb); break; } if (newskb->data[0] & SL_TYPE_COMPRESSED_TCP) { //newskb->data[0] &= ~SL_TYPE_COMPRESSED_TCP; proto = PPP_PROTO_VJ_C; } else if (newskb->data[0] & SL_TYPE_UNCOMPRESSED_TCP) { newskb->data[0] = skb->data[0]; proto = PPP_PROTO_VJ_UN; } else { /* unchanged, so transmit as normal ip */ printk("wierd\n"); b_dev_kfree_skb(newskb); break; } newskb->dev = skb->dev; skb->dev = NULL; b_dev_kfree_skb(skb); skb = newskb; skb_put(skb, newlen); } #endif break; case ETH_P_IPV6: proto = PPP_PROTO_IPV6; if (!(b->lflags & BF_PASS_IPV6)) goto drop; break; case ETH_P_IPX: proto = PPP_PROTO_IPX; if (!(b->lflags & BF_PASS_IPX)) goto drop; break; default: pr_debug("%s: dropping unknown protocol 0x%x\n", b->name, skb->protocol); goto drop; } __b_xmit(b, skb, proto); netif_wake_queue(dev); return 0; drop: b_dev_kfree_skb(skb); b->stats.tx_dropped++; /* we didn't transmit, but got the lock, so we must kick if anything's queued */ netif_wake_queue(b->bdev); netif_wake_queue(dev); return 0; #if LINUX_VERSION_CODE < 0x02032B tx_busy: pr_debug("tx while busy\n"); return -EBUSY; #endif } #if LINUX_VERSION_CODE < 0x20100 static int b_rebuild_header(void *buf, struct net_device *dev, unsigned long daddr, struct sk_buff *skb) #else static int b_rebuild_header(struct sk_buff *skb) #endif { return 0; } static struct net_device_stats *b_net_getstats(struct net_device *dev) { struct bundle *b = netdev_priv(dev); return &b->stats; } static int b_net_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static int b_net_stop(struct net_device *dev) { struct bundle *b = netdev_priv(dev); /* hmm, need to signal down event -- just drop all channels? */ netif_stop_queue(b->bdev); if (b->tx_skb) { b_dev_kfree_skb(b->tx_skb); b->tx_skb = NULL; } if (b->frag_skb) { b_dev_kfree_skb(b->frag_skb); b->frag_skb = NULL; } return 0; } #if LINUX_VERSION_CODE > 0x20617 #define HH_CONST const #else #define HH_CONST /**/ #endif /* we put our header onto the packet here */ static int b_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, HH_CONST void *daddr, HH_CONST void *saddr, unsigned len) { skb_push(skb, dev->hard_header_len); return dev->hard_header_len; } static int b_net_init(struct net_device *dev) { int i; dev->addr_len = 0; dev->tx_queue_len = 8; dev_init_buffers(dev); #if LINUX_VERSION_CODE < 0x20100 dev->family = AF_INET; dev->pa_brdaddr = 0xffffffff; dev->pa_mask = 0xffffffff; dev->pa_alen = 4; #endif #if LINUX_VERSION_CODE <= 0x20617 dev->hard_header = b_hard_header; dev->header_cache_update = NULL; dev->rebuild_header = b_rebuild_header; #else { static struct header_ops b_net_header_ops = { .create = b_hard_header, .rebuild = b_rebuild_header, }; dev->header_ops = &b_net_header_ops; } #endif dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->type = ARPHRD_PPP; dev->hard_header_len = 8+20+40; /* maximum length of header we will add in b_net_xmit */ dev->addr_len = ETH_ALEN; for (i=0; ibroadcast[i] = 0xff; #if !defined(HAVE_NET_DEVICE_OPS) dev->change_mtu = NULL; dev->get_stats = b_net_getstats; dev->open = b_net_open; dev->stop = b_net_stop; dev->hard_start_xmit = b_net_xmit; dev->do_ioctl = NULL; #endif return 0; } #if defined(HAVE_NET_DEVICE_OPS) static struct net_device_ops b_net_ops = { .ndo_init = b_net_init, .ndo_open = b_net_open, .ndo_stop = b_net_stop, .ndo_get_stats = b_net_getstats, .ndo_start_xmit = b_net_xmit, }; #endif static int last_dev_id; int alloc_bdev_id(int id, struct bundle *b) { int i; spin_lock(&channels_lock); if (id >= 0 && id < B_MAX_DEV) { if (!b_devs[id]) { b_devs[id] = b; spin_unlock(&channels_lock); return id; } } id = last_dev_id; for (i=0; i= 0) && (id < last_dev_id)) last_dev_id = id; } static void bundle_setup(struct net_device *dev) { struct bundle *b = netdev_priv(dev); b->bdev = dev; return; } static struct bundle *alloc_bundle(int i, struct net *net, const char *req_name) { struct bundle *b = NULL; struct net_device *dev; char name[32]; MOD_INC_USE_COUNT; i = alloc_bdev_id(i, B_DEVS_REGISTERING); if (i < 0) return NULL; if (!req_name) { sprintf(name, "aps%d", i); req_name = name; } dev = alloc_netdev(sizeof(struct bundle), req_name, bundle_setup); if (!dev) goto out; b = netdev_priv(dev); if ((i >= 0) && (i < B_MAX_DEV) && (b_devs[i] != B_DEVS_REGISTERING)) { free_netdev(dev); MOD_DEC_USE_COUNT; return b_devs[i]; } spin_lock_init(&b->bundle_lock); lockdep_set_class(&b->bundle_lock, &bundle_lock_key); #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) b->slhc = slhc_init(16, 16); if (!b->slhc) { printk("slhc_init failed\n"); free_netdev(dev); b_devs[i] = NULL; return NULL; } #endif if (b_devs[i] != B_DEVS_REGISTERING) goto out; #if defined(HAVE_NET_DEVICE_OPS) b->bdev->netdev_ops = &b_net_ops; #else b->bdev->init = b_net_init; #endif b->m_seq = 0xffffff; b->min_frags = 1; b->max_frags = ~0U; strncpy(b->name, req_name, sizeof(b->name)); b->name[sizeof(b->name) - 1] = 0; b->index = i; #if LINUX_VERSION_CODE < 0x02032B b->bdev->name = b->name; #else strncpy(b->bdev->name, b->name, sizeof(b->bdev->name)); b->bdev->name[sizeof(b->bdev->name) - 1] = 0; #endif #ifdef CONFIG_NET_NS #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) dev_net_set(b->bdev, net ? net : current->nsproxy->net_ns); #elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) && (defined(CONFIG_NET_NS) || LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) b->bdev->nd_net = net ? net : current->nsproxy->net_ns; /* FIXME */ #endif #endif if (0 == register_netdev(b->bdev)) { pr_debug("alloc_bundle succeeded(%s @ %p)\n", b->name, b); b_devs[i] = b; return b; } free_bdev_id(i); WARN_ON(b_devs[i] != B_DEVS_REGISTERING); b_devs[i] = NULL; out: if (b) { #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) if (b->slhc) slhc_free(b->slhc); #endif free_netdev(dev); } MOD_DEC_USE_COUNT; return NULL; } static int free_bundle(struct bundle *b) { int id = b->index; if (b_devs[id] != b) { printk("free_bundle: mismatch %p vs %p\n", b_devs[id], b); WARN_ON(1); } else b_devs[id] = NULL; netif_stop_queue(b->bdev); while (b->chan) leave_bundle(b->chan); unregister_netdev(b->bdev); #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) slhc_free(b->slhc); #endif free_netdev(b->bdev); free_bdev_id(id); MOD_DEC_USE_COUNT; return 0; } /* returns true if left >= right, implemented by checking if left - right is not a negative 24 bit signed number */ static inline int seq_ge(u32 left, u32 right) { return !((left - right) & 0x00800000); } /* returns true if left <= right, implemented by checking if left - right is not a negative 24 bit signed number */ static inline int seq_le(u32 left, u32 right) { return !((right - left) & 0x00800000); } static inline int seq_lt(u32 left, u32 right) { return !seq_ge(left, right); } static inline int seq_eq(u32 left, u32 right) { return (left & 0x00ffffff) == (right & 0x00ffffff); } static inline struct sk_buff *find_seq(struct bundle *b, u32 seq) { struct sk_buff *skb; for (skb=b->frags[seq & FRAG_HASH_MASK]; skb && !seq_eq(seq, *(u32 *)skb->data); skb=skb->next) ; pr_debug("find_seq(%08x)=%p\n", seq, skb); return skb; } static struct sk_buff *yank_seq(struct bundle *b, u32 seq) { struct sk_buff **skb_p, *skb; for (skb_p=&b->frags[seq & FRAG_HASH_MASK]; *skb_p && !seq_eq(seq, *(u32 *)(*skb_p)->data); skb_p=&(*skb_p)->next) ; if (!*skb_p) printk(KERN_ALERT "yank_seq: %08x not found!\n", seq); skb = *skb_p; *skb_p = skb->next; skb_pull(skb, 4); b->num_frags--; skb->dev = NULL; return skb; } /**/ static int try_to_reassem_rain(struct bundle *b, u32 seq) { int i, found, missing = -1; int total_bytes, offset, reassem_bytes; int nfrags; struct sk_buff *xor_skb = find_seq(b, seq); struct sk_buff *reassem_skb; int missing_bytes; unsigned char *data; int cur_offset; nfrags = ntohs(*(u16 *)(xor_skb->data + 4)); reassem_bytes = nfrags & 0xfff; nfrags = nfrags >> 12; pr_debug("try_to_reassem_rain(%p, 0x%08x) nfrags=%d reassem_bytes=%d\n", b, seq, nfrags, reassem_bytes); for (i=found=offset=total_bytes=0; i < nfrags; i++) { u32 cur_seq = (seq + i - nfrags) & 0x00ffffff; struct sk_buff *skb = find_seq(b, cur_seq); pr_debug("i=%d, cur_seq=0x%08x, skb=%p\n", i, cur_seq, skb); if (skb) { found++; total_bytes += skb->len - 4; if (missing == -1) offset += skb->len - 4; } else if (missing == -1) missing = i; } pr_debug("missing=%d, found=%d, total_bytes=%d, offset=%d\n", missing, found, total_bytes, offset); if (found < (nfrags - 1)) { pr_debug("not enough frags... returning\n"); return 0; } xor_skb = yank_seq(b, seq); missing_bytes = reassem_bytes - total_bytes; if (missing_bytes <= 0) { printk("missing_bytes=%d missing=%d, found=%d, total_bytes=%d, offset=%d reassem_bytes=%d\n", missing_bytes, missing, found, total_bytes, offset, reassem_bytes); b->stats.rx_errors++; b_kfree_skb(xor_skb); return 0; } pr_debug("xor_skb = %p len = %d, missing_bytes = %d\n", xor_skb, xor_skb->len, missing_bytes); skb_pull(xor_skb, 2); /* u16 rain_hdr; */ reassem_skb = dev_alloc_skb(reassem_bytes); if (!reassem_skb) return 0; /* FIXME - should discard frags */ data = skb_put(reassem_skb, reassem_bytes); memcpy(data + offset, xor_skb->data, missing_bytes); xor_skb->dev = NULL; b_kfree_skb(xor_skb); for (i=cur_offset=0; i < nfrags; i++) { u32 cur_seq = (seq + i - nfrags) & 0x00ffffff; struct sk_buff *skb; int len; if (i == missing) { cur_offset += missing_bytes; continue; } skb = yank_seq(b, cur_seq); len = skb->len; pr_debug("i=%d, cur_seq=0x%08x, len=%d, cur_offset=%d\n", i, cur_seq, len, cur_offset); memcpy(data + cur_offset, skb->data, len); cur_offset += len; if (missing_bytes < len) len = missing_bytes; pr_debug("xor_to_buf:\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n", skb->data[ 0], skb->data[ 1], skb->data[ 2], skb->data[ 3], skb->data[ 4], skb->data[ 5], skb->data[ 6], skb->data[ 7], skb->data[ 8], skb->data[ 9], skb->data[10], skb->data[11], skb->data[12], skb->data[13], skb->data[14], skb->data[15] ); xor_to_buf(data + offset, skb->data, len); skb->dev = NULL; b_kfree_skb(skb); } demux_pkt(b, reassem_skb); return 1; } /* Try to reassemble a fragment and be efficient about it. This code makes a couple of implicite assumptions, most notably that when it reads the sequence number out of a packet in the queue it just did a find_seq on, that sequence number will be identical. Otherwise, gaps in the queue may arise, leading to a NULL pointer deref in yank_seq. -ben */ static void try_to_reassem(struct bundle *b, u32 seq) { struct sk_buff *skb, *tmp; u32 b_seq, e_seq; unsigned len_needed = 0; WARN_ON(!spin_is_locked(&b->bundle_lock)); if ((b->lflags & BF_RAIN) && (seq & 0x20000000)) { try_rain: try_to_reassem_rain(b, seq); return; } if (b->lflags & BF_RAIN) { int i; for (i=0; i<11; i++) { /* FIXME */ u32 tmp_seq = (seq + i) & 0x00ffffff; skb = find_seq(b, tmp_seq); if (skb) { tmp_seq = *(u32 *)skb->data; if (tmp_seq & 0x20000000) { if (try_to_reassem_rain(b, tmp_seq)) return; } } } } e_seq = seq; if (e_seq & 0x80000000) e_seq++; do { if (!(skb = find_seq(b, e_seq))) goto out; e_seq = *(u32 *)skb->data; if ((b->lflags & BF_RAIN) && (seq & 0x20000000)) goto try_rain; if (e_seq & 0x80000000) goto bad_ml; len_needed += skb->len - 4; pr_debug("try_to_reassem:e found %08x\n", e_seq); } while (!(0x40000000 & e_seq++)); e_seq--; b_seq = seq; if (b_seq & 0x40000000) b_seq--; do { if (!(skb = find_seq(b, b_seq))) goto out; b_seq = *(u32 *)skb->data; if ((b->lflags & BF_RAIN) && (seq & 0x20000000)) goto try_rain; if (b_seq & 0x40000000) goto bad_ml; len_needed += skb->len - 4; pr_debug("try_to_reassem:b found %08x\n", b_seq); } while (!(0x80000000 & b_seq--)); b_seq++; //printk(KERN_DEBUG "%s: fragment complete from %08x -> %08x\n", b->name, b_seq, e_seq); /* we have a complete fragment */ skb = yank_seq(b, b_seq); if (skb_tailroom(skb) < (len_needed - skb->len)) { tmp = skb; skb = dev_alloc_skb(len_needed); if (!skb) { printk(KERN_WARNING "try_to_reassem: out of memory for %u byte packet\n", len_needed); goto out; } skb->dev = NULL; pr_debug("try_to_reassem: got new skb\n"); #if LINUX_VERSION_CODE < 0x20100 skb->free = 1; #endif goto copy; } b_seq++; /* we now walk the frament from start+1 -> end, putting them into the new buffer. */ do { pr_debug("try_to_reassem: getting %08x\n", b_seq); tmp = yank_seq(b, b_seq); copy: memcpy(skb_put(skb, tmp->len), tmp->data, tmp->len); b_kfree_skb(tmp); b_seq ++; } while (seq_le(b_seq, e_seq)) ; demux_pkt(b, skb); out: return; bad_ml: printk(KERN_WARNING "%s/%s: bad multilink sequence %08x discarded\n", b->name, ((struct chan *)skb->dev)->ch->device_name, *(u32 *)skb->data); skb = yank_seq(b, *(u32 *)skb->data); b_kfree_skb(skb); } static void discard_frags_le(struct bundle *b, u32 seq) { unsigned i; if (!b->num_frags) return; for (i=0; ifrags[i]; *skb_p; ) { struct sk_buff *skb = *skb_p; if (seq_le(*(u32 *)skb->data, seq)) { b->stats.rx_dropped++; *skb_p = skb->next; skb->next = NULL; skb->dev = NULL; b_kfree_skb(skb); b->num_frags--; } else skb_p = &skb->next; } } } static void discard_all_frags(struct bundle *b) { unsigned i; WARN_ON(!spin_is_locked(&b->bundle_lock)); if (!b->num_frags) return; for (i=0; ifrags[i]; skb; skb=b->frags[i]) { b->stats.rx_dropped++; b->frags[i] = skb->next; skb->next = NULL; skb->dev = NULL; b_kfree_skb(skb); } } b->num_frags = 0; } static void recompute_m(struct bundle *b) { struct chan *ch = b->chan; int valid = 0; do { if (ch->m_valid && (!valid || seq_lt(ch->m_seq, b->m_seq))) { valid = 1; b->m_seq = ch->m_seq; } ch = ch->next; } while (ch != b->chan); if (b->lflags & BF_RAIN) return; if (valid) { u32 seq = b->m_seq; struct sk_buff *skb; while ((skb = find_seq(b, seq))) { if (0x40000000 & *(u32 *)skb->data) { discard_frags_le(b, seq); return; } seq--; if (0x80000000 & *(u32 *)skb->data) { discard_frags_le(b, seq); return; } } discard_frags_le(b, b->m_seq); } else discard_all_frags(b); } /* mp_reassem * Must be called from bh context. */ static void mp_reassem(struct bundle *b, struct sk_buff *skb) { struct chan *ch = (struct chan *)skb->dev; struct sk_buff *oskb; int redo_m = 0; u32 seq; /* FIXME: make this zero copy where possible */ if (!pskb_may_pull(skb, skb->len)) return; spin_lock(&b->bundle_lock); if (BF_SSN & b->lflags) { /* convert 12 bit short sequence number into 24 bit sequence number */ seq = ntohs(*(u16 *)skb->data); seq = ((seq & 0xf000) << 16) | (seq & 0x0fff); if (!(seq & 0x0800) && (b->m_seq & 0x0800)) seq |= (b->m_seq + 0x00001000) & 0x00fff000; else seq |= b->m_seq & 0x00fff000; pr_debug("%s: packet with short seq %04x -> %08x\n", ch->ch->device_name, ntohs(*(u16 *)skb->data), seq); skb_push(skb, 2); /* get extra space for internal storage of the sequence */ } else { seq = ntohl(*(u32 *)skb->data); pr_debug("%s: packet with long seq %08x\n", ch->ch->device_name, seq); } /* do we need to recompute m? */ redo_m = (!ch->m_valid || ch->m_seq == b->m_seq); ch->m_seq = seq; ch->m_valid = 1; if ((oskb=find_seq(b, seq))) goto discard; if ((seq & 0xc0000000) == 0xc0000000) { skb_pull(skb, 4); skb->dev = NULL; demux_pkt(b, skb); goto out; } pr_debug("mp_reassem: adding skb=%p seq=%08x\n", skb, seq); *(u32 *)skb->data = seq; skb->prev = NULL; if (BF_RAIN & b->lflags) { /* FIXME: RAIN mode has to disable the intelligent fragment * dropping code at present due to the fact that packets don't * end with just the MLPPP end flag set. */ skb->next = NULL; if (b->frags[seq & FRAG_HASH_MASK]) { b->num_frags--; b->frags[seq & FRAG_HASH_MASK]->dev = NULL; b_kfree_skb(b->frags[seq & FRAG_HASH_MASK]); } } else skb->next = b->frags[seq & FRAG_HASH_MASK]; b->frags[seq & FRAG_HASH_MASK] = skb; b->num_frags++; try_to_reassem(b, seq); out: if (redo_m) recompute_m(b); spin_unlock(&b->bundle_lock); return; discard: printk(KERN_WARNING "%s: duplicate sequence number %08x ssn=%c\n", ch->ch->device_name, seq, (BF_SSN & b->lflags) ? 'y' : 'n'); ch->ch->stats.rx_errors++; skb->dev = NULL; b_kfree_skb(skb); /* it's best to be safe here and toss both, as otherwise the packet is likely to be bogus */ oskb = yank_seq(b, seq); b_kfree_skb(oskb); if (redo_m) recompute_m(b); spin_unlock(&b->bundle_lock); } static inline int pull_protocol(struct bundle *b, struct sk_buff *skb) { int ret = -1; int protocol; if (htons(0xff03) == *(u16 *)skb->data) { if (!skb_pull(skb, 2)) { pr_drop("%s: skb_pull(2 -- ACF) failed\n", b->name); goto drop; } } protocol = *(u8 *)skb->data; if (!skb_pull(skb, 1)) { pr_drop("%s: skb_pull(1 -- proto a) failed\n", b->name); goto drop; } if (!(0x01 & protocol)) { protocol <<= 8; protocol |= *(u8 *)skb->data; if (!skb_pull(skb, 1)) { pr_drop("%s: skb_pull(1 -- proto b) failed\n", b->name); goto drop; } } ret = protocol; drop: return ret; } static void bab_ReInput(struct chan *dev, struct sk_buff *skb) { if (dev && dev->ch && dev->ch->ReInput) { dev->ch->ReInput(dev->ch, skb); return; } if (dev && skb_queue_len(&dev->rx_q) < 32) { skb_queue_tail(&dev->rx_q, skb); wake_up_interruptible(&dev->rx_wait); wake_up_interruptible(&dev->wait); } else { if (dev) dev->ch->stats.rx_dropped++; b_kfree_skb(skb); } } static void demux_pkt(struct bundle *b, struct sk_buff *skb) { struct chan *ch; int prot; u8 *p; ch = (struct chan *)skb->dev; /* FIXME: hack: make sure we have the first few bytes of headers * 8 bytes is enough for ACFC(2) + proto (2) + MLPPP (4) */ if (!pskb_may_pull(skb, 8)) { b->stats.rx_errors++; if (ch) { ch->ch->stats.rx_dropped++; } return; } pr_debug("demux_pkt\n"); prot = pull_protocol(b, skb); if (prot < 0) goto drop; skb->protocol = prot; pr_debug("rx_pkt: protocol = %04x\n", skb->protocol); if (b && skb->protocol < 0xc000) { skb_reset_mac_header(skb); switch (skb->protocol) { case PPP_PROTO_MP: if (skb->dev) { /* skb->dev is NULL for mp reassembled packets */ if (!(b->lflags & BF_PASS_ML)) { pr_drop("%s: MP: MP not enabled\n", b->name); goto drop; } mp_reassem(b, skb); return; } printk(KERN_ERR "%s: multilink header in multilink packet???? Dropping.\n", b->name); goto out; #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) case PPP_PROTO_VJ_C: { int newlen; if (!(b->lflags & BF_VJC)) { pr_drop("%s: VJ_C: VJC not enabled\n", b->name); goto drop; } newlen = slhc_uncompress(b->slhc, skb->data, skb->len); if (newlen <= 0) { pr_drop("slhc err on %d\n", skb->len); goto drop; } if (newlen > skb->len) skb_put(skb, newlen - skb->len); else if (newlen < skb->len) skb_trim(skb, newlen); if (!(b->lflags & BF_PASS_IP)) goto drop; skb->protocol = htons(ETH_P_IP); break; } case PPP_PROTO_VJ_UN: if (!(b->lflags & BF_VJC)) { pr_drop("%s: VJ_UN: VJC not enabled\n", b->name); goto drop; } if (slhc_remember(b->slhc, skb->data, skb->len) <= 0) { pr_drop("%s: slhc_remember failed\n", b->name); goto drop; } #endif case PPP_PROTO_IP: if (!(b->lflags & BF_PASS_IP)) { pr_drop("%s: dropping ip packet\n", b->name); goto drop; } skb->protocol = htons(ETH_P_IP); break; case PPP_PROTO_IPV6: if (!(b->lflags & BF_PASS_IPV6)) { pr_drop("%s: dropping ipv6 packet\n", b->name); goto drop; } skb->protocol = htons(ETH_P_IPV6); break; case PPP_PROTO_IPX: if (!(b->lflags & BF_PASS_IPX)) { pr_drop("%s: dropping ipx packet\n", b->name); goto drop; } skb->protocol = htons(ETH_P_IPX); break; default: goto out; } pr_debug("rx_pkt: calling netif_rx len=%d, data=%02x %02x %02x %02x\n", skb->len, skb->data[0], skb->data[1], skb->data[2], skb->data[3]); b->rx_bytes += skb->len; b->stats.rx_packets ++; #if LINUX_VERSION_CODE >= 0x20100 b->stats.rx_bytes += skb->len; #endif skb->dev = b->bdev; netif_rx(skb); return; } out: p = skb_push(skb, 2); *p++ = skb->protocol >> 8; *p++ = skb->protocol; if (skb->dev) { ch = (struct chan *)skb->dev; skb->dev = NULL; } else if (b) { /* put a dummy multilink header on the packet for Babylon. */ ch = b->chan; if (BF_SSN & b->lflags) { p = skb_push(skb, 4); p[2] = 0xc0 | ((b->dummy_seq >> 8) & 0x0f); p[3] = b->dummy_seq; b->dummy_seq = (b->dummy_seq + 1) & 0x0fff; } else { p = skb_push(skb, 6); p[2] = 0xc0; p[3] = b->dummy_seq >> 16; p[4] = b->dummy_seq >> 8; p[5] = b->dummy_seq; b->dummy_seq = (b->dummy_seq + 1) & 0x00ffffff; } p[0] = PPP_PROTO_MP >> 8; p[1] = PPP_PROTO_MP; if (!(b->lflags & BF_ACFC)) { p = skb_push(skb, 2); *p++ = 0xff; *p++ = 0x03; } } else ch = NULL; bab_ReInput(ch, skb); return; drop: b->stats.rx_errors++; skb->dev = NULL; b_kfree_skb(skb); } static inline void rx_skb(struct sk_buff *skb) { struct chan *ch = (struct chan *)skb->dev; pr_debug("input_bh: len=%d, data=\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n" , skb->len, skb->data[ 0], skb->data[ 1], skb->data[ 2], skb->data[ 3], skb->data[ 4], skb->data[ 5], skb->data[ 6], skb->data[ 7], skb->data[ 8], skb->data[ 9], skb->data[10], skb->data[11], skb->data[12], skb->data[13], skb->data[14], skb->data[15] ); ch->activity_jiffies = jiffies; if (ch->multihop_other) { int ret, len; skb->dev = NULL; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto dropped; len = skb->len; ret = ch_Output(ch->multihop_other, skb); pr_debug("ch->multihop_other(%p) ret = %d\n", ch->multihop_other, ret); if (ret) { b_kfree_skb(skb); dropped: ch->ch->stats.rx_dropped++; } else { ch->ch->stats.rx_packets ++; #if LINUX_VERSION_CODE >= 0x20100 ch->ch->stats.rx_bytes += len; #endif } return; } ch->ch->stats.rx_packets ++; ch->ch->stats.rx_bytes += skb->len; if ((ch->lflags & BF_PPP) && ch->bundle) { demux_pkt(ch->bundle, skb); return; } pr_debug("input_bh(%s, %p): ppp !active, putting in channel queue\n", ch->ch->device_name, skb); skb->dev = NULL; bab_ReInput(ch, skb); } static int setup_call(channel_t *ch) { struct chan *call; int i; call = kmem_cache_alloc(chan_cachep, GFP_ATOMIC); if (!call) { pr_debug("Erk\n"); return -ENOMEM; } memset(call, 0, sizeof(*call)); spin_lock_init(&call->chan_lock); lockdep_set_class(&call->chan_lock, &chan_lock_key); skb_queue_head_init(&call->rx_q); skb_queue_head_init(&call->tx_q); init_waitqueue_head(&call->rx_wait); init_waitqueue_head(&call->tx_wait); init_waitqueue_head(&call->wait); call->callid = -1; call->mtu = 1500; call->net = NULL; call->idx = -1; call->activity_jiffies = jiffies; spin_lock(&channels_lock); for (i=0; icallid = callid; callid = (callid + 1) % B_MAX_DEV; break; } callid = (callid + 1) % B_MAX_DEV; } spin_unlock(&channels_lock); if (call->callid == -1) printk("no callid\n"); pr_debug("%s: call->callid = %d [%d]\n", ch->device_name, call->callid, callid); call->ch = ch; call->use_count = 1; /* use count of 1 is the channel_t->link */ MOD_INC_USE_COUNT; ch->link = call; return 0; } struct chan *babylon_alloc_call(channel_t *ch, int *retp) { struct chan *call = NULL; *retp = -ENODEV; local_irq_disable(); /* FIXME: race */ spin_lock(&channels_lock); ch = channels[ch->channels_index]; spin_unlock(&channels_lock); if (!ch) goto out; if (!ch->link) setup_call(ch); call = ch->link; if (call) { call->idx = ch->index; } else *retp = -EAGAIN; out: local_irq_enable(); return call; } EXPORT_SYMBOL_GPL(babylon_alloc_call); static struct chan *get_call(unsigned idx, int *retp) { channel_t *ch; *retp = -ENODEV; if (idx >= B_MAX_DEV) return NULL; spin_lock(&channels_lock); ch = channels[idx]; spin_unlock(&channels_lock); return babylon_alloc_call(ch, retp); } /* release_allocd_bundle * When a call is one of many in a multilink bundle, this function passes the * allocd_bundle on to one of the remaining multilink calls. This allows the * interface to continue existing so long as there * channels part of the link. */ static void release_allocd_bundle(struct chan *dev) { pr_debug("release_allocd_bundle(%s)\n", dev->ch->device_name); if (dev->next && dev->next != dev) { if (dev->next->allocd_bundle) BUG(); dev->next->allocd_bundle = dev->allocd_bundle; dev->allocd_bundle = NULL; } } /* join_bundle * Assumes bh is disabled for bundle_lock. */ static int join_bundle(struct chan *ch, struct bundle *b) { int ret; pr_debug("join_bundle(%s, %s)\n", ch->ch->device_name, b->name); WARN_ON(!spin_is_locked(&ch->chan_lock)); spin_lock(&b->bundle_lock); ret = -EBUSY; if (ch->bundle) goto out; ret = -EIO; if (CS_CONNECTED != ch->ch->state) goto out; ret = 0; ch->bundle = b; if (b->chan) { ch->next = b->chan->next; b->chan->next = ch; } else { ch->next = ch; b->chan = ch; } b->num_chan++; spin_unlock(&b->bundle_lock); kick_tx(ch); return 0; out: spin_unlock(&b->bundle_lock); return ret; } static void leave_bundle(struct chan *ch) { struct bundle *b; struct chan **prevp; pr_debug("leave_bundle(%s)\n", ch->ch->device_name); WARN_ON(!spin_is_locked(&ch->chan_lock)); if (!(b=ch->bundle)) goto out; WARN_ON(!spin_is_locked(&b->bundle_lock)); if (ch->allocd_bundle) release_allocd_bundle(ch); netif_tx_lock(b->bdev); /* Quiesce transmit */ prevp = &b->chan->next; do { if (*prevp == ch) break; prevp = &(*prevp)->next; } while (prevp != &b->chan->next); if (*prevp == ch) *prevp = ch->next; if (b->chan == ch) b->chan = (ch->next == ch) ? NULL : ch->next; ch->next = NULL; ch->bundle = NULL; b->num_chan--; if (!b->chan) { /* bundle is now idle, need to tidy up for next use */ discard_all_frags(b); b->m_seq = 0xffffff; } netif_tx_unlock(b->bdev); out: return; } static void call_free_rcu(struct rcu_head *head) { struct chan *call = container_of(head, struct chan, rcu_head); kmem_cache_free(chan_cachep, call); MOD_DEC_USE_COUNT; } /* put_call * - Call from process context only! */ static void put_call(struct chan *call) { struct bundle *bundle = NULL; pr_debug("put_call(%p)/%d ch(%p/%s)\n", call, call->use_count, call->ch, call->ch ? call->ch->device_name : ""); WARN_ON(in_irq()); WARN_ON(in_softirq()); WARN_ON(in_interrupt()); BUG_ON(call->use_count <= 0); spin_lock_bh(&call->chan_lock); if (!--call->use_count) { struct sk_buff *skb; channel_t *ch; ch = call->ch; if (ch) { call->final_stats = ch->stats; call->ch = NULL; ch->link = NULL; /* only do the hangup after dropping the call, as hangup will cause Down to be called * and we can't let it see the call in a half-closed state. */ if (call->user_active) { if (ch->Hangup(ch)) { printk(KERN_DEBUG "bab: can't abort call -- memory leak!\n"); goto out; } call->user_active = 0; } if (ch->Release) ch->Release(ch); } if (call->bundle) { struct bundle *b = call->bundle; spin_lock(&b->bundle_lock); leave_bundle(call); spin_unlock(&b->bundle_lock); } while (NULL != (skb = skb_dequeue(&call->rx_q))) b_kfree_skb(skb); while (NULL != (skb = skb_dequeue(&call->tx_q))) b_dev_kfree_skb(skb); spin_unlock_bh(&call->chan_lock); spin_lock(&channels_lock); if (call->callid >= 0 && call->callid < B_MAX_DEV && calls[call->callid] == call) { calls[call->callid] = NULL; if (call->callid < callid) callid = call->callid; } bundle = call->allocd_bundle; call->allocd_bundle = NULL; if (call->multihop_other) { pr_debug("multihop teardown(%p, %p)\n", call, call->multihop_other); if (call->multihop_other->multihop_other != call) BUG(); call->multihop_other->multihop_other = NULL; call->multihop_other = NULL; } spin_unlock(&channels_lock); if (call->net) put_net(call->net); call->net = NULL; call_rcu(&call->rcu_head, call_free_rcu); out: ; } else spin_unlock_bh(&call->chan_lock); if (bundle) free_bundle(bundle); } /* * forcibly drops a call */ static void drop_call(struct chan *call) { struct bundle *b; channel_t *ch; spin_lock_bh(&call->chan_lock); pr_debug("drop_call(%p)\n", call); BUG_ON(!call->ch); b = call->bundle; if (b) { spin_lock(&b->bundle_lock); leave_bundle(call); spin_unlock(&b->bundle_lock); } ch = call->ch; call->user_active = 0; call->ch->link = NULL; call->ch = NULL; call->final_stats = ch->stats; wake_up_interruptible(&call->rx_wait); wake_up_interruptible(&call->tx_wait); wake_up_interruptible(&call->wait); spin_unlock_bh(&call->chan_lock); /* FIXME */ put_call(call); if (ch && ch->Release) ch->Release(ch); } static int b_dial(unsigned idx, struct file *filp, const char *number) { DECLARE_WAITQUEUE(wait, current); struct chan *dev; int ret = 0; pr_debug("b_dial(%d, %s)\n", idx, number); local_irq_disable(); dev = filp->private_data; if (!dev || !dev->ch || !dev->ch->use_existing_call) { dev = get_call(idx, &ret); if (!dev) { local_irq_enable(); return ret; } } if (dev->user_active || dev->dialing) { local_irq_enable(); put_call(dev); return -EBUSY; } if (filp->private_data && filp->private_data != dev) put_call(filp->private_data); filp->private_data = dev; dev->dialing = 1; dev->user_status = 0; dev->user_active = 1; if (!(O_NONBLOCK & filp->f_flags)) { current->state = TASK_INTERRUPTIBLE; add_wait_queue(&dev->wait, &wait); } ret = dev->ch->Connect(dev->ch, number, dev->ch->callType); if (ret) dev->user_active = 0; local_irq_enable(); if (!(O_NONBLOCK & filp->f_flags)) { if (!ret) { schedule(); if (signal_pending(current)) ret = -ERESTARTSYS; else ret = dev->user_status; } current->state = TASK_RUNNING; remove_wait_queue(&dev->wait, &wait); } return ret; } int ch_JoinBundle(struct chan *dev, unsigned long arg) { struct bundle *b; channel_t *ch; int ret; if (!dev) return -EPIPE; spin_lock(&b_devs_lock); spin_lock_bh(&dev->chan_lock); ch = dev->ch; ret = -EPIPE; if (!ch) goto out; ret = -EBUSY; if (dev->bundle) goto out; if (dev->multihop_other) goto out; ret = -ENOENT; if (arg >= B_MAX_DEV) goto out; b = ACCESS_ONCE(b_devs[arg]); if (!b || (b == B_DEVS_REGISTERING)) goto out; ret = join_bundle(dev, b); out: spin_unlock_bh(&dev->chan_lock); spin_unlock(&b_devs_lock); return ret; } /* setup_multihop * Called from process context (via ioctl). */ int setup_multihop(struct chan *dev, unsigned long callid) { struct chan *other; int ret; spin_lock(&channels_lock); /* protect access of calls[] */ spin_lock_bh(&dev->chan_lock); ret = -EBUSY; if (dev->bundle) goto out; ret = -ENOENT; if (callid >= B_MAX_DEV) goto out; other = calls[callid]; ret = -ENOENT; if (!other) goto out; ret = -EBUSY; if (dev->multihop_other) goto out; ret = -EBUSY; if (other->multihop_other) goto out; other->multihop_other = dev; dev->multihop_other = other; pr_debug("setup_multihop(%p/%s, %p/%s)\n", dev, dev->ch->device_name, other, other->ch->device_name); spin_unlock_bh(&dev->chan_lock); spin_unlock(&channels_lock); return 0; out: spin_unlock_bh(&dev->chan_lock); spin_unlock(&channels_lock); return ret; } /* * Called when a program makes an ioctl call against one of our * file nodes */ long babylon_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct chan *dev = filp->private_data; return ch_ioctl(filp->f_dentry->d_inode, filp, dev, cmd, arg); } EXPORT_SYMBOL_GPL(babylon_unlocked_ioctl); int babylon_ioctl(struct inode *ino, struct file *filp, unsigned int cmd, unsigned long arg) { struct chan *dev = filp->private_data; return ch_ioctl(ino, filp, dev, cmd, arg); } EXPORT_SYMBOL_GPL(babylon_ioctl); int ch_ioctl(struct inode *ino, struct file *filp, struct chan *dev, unsigned int cmd, unsigned long arg) { struct bundle *b; channel_t *ch; int i; pr_debug("babylon_ioctl(%p, %08x, %08lx)\n", dev, cmd, arg); if (BIOCGETCHNAME == cmd) { if (!dev) return -ENODEV; if (copy_to_user((void *)arg, dev->ch->device_name, 1 + strlen(dev->ch->device_name))) return -EFAULT; return 0; } else if (BIOCGETCHCLASS == cmd) { if (!dev) return -ENODEV; if (!dev->ch) return -EPIPE; if (copy_to_user((void *)arg, dev->ch->dev_class, 1 + strlen(dev->ch->dev_class))) return -EFAULT; return 0; } else if (BIOCDIAL == cmd) { char tmp[32]; if (dev && (!dev->ch || !dev->ch->use_existing_call)) return -EBUSY; if (copy_from_user(tmp, (void *)arg, 31)) return -EFAULT; tmp[31] = 0; return b_dial(MINOR(ino->i_rdev), filp, tmp); } else if (BIOCANSWER == cmd) { if (!dev) { int ret; dev = get_call(MINOR(ino->i_rdev), &ret); if (!dev) return ret; filp->private_data = dev; } dev->user_calltypes = arg; return 0; } else if (BIOCGETCALLID == cmd) { if (!dev) return -ENODEV; return dev->callid; } else if (BIOCGETDEVID == cmd) { if (!dev) return -ENODEV; b = dev->bundle; if (!b) { struct chan *cur = dev; do { if (cur->allocd_bundle) return cur->allocd_bundle->index; cur = cur->next; } while (cur && cur != dev) ; return -EUNATCH; } return b->index; } else if (BIOC_SET_MULTIHOP == cmd) { if (!dev || !dev->ch) return -ENODEV; return setup_multihop(dev, arg); } else if (BIOC_SET_MTU == cmd) { if (!dev) return -ENODEV; if (arg < 576 || arg > 65535) return -EINVAL; dev->mtu = arg; return 0; } else if (BIOCJOINBUNDLE == cmd) { return ch_JoinBundle(dev, arg); } else if (BIOCLEAVEBUNDLE == cmd) { struct bundle *b; if (!dev) return -ENODEV; spin_lock_bh(&dev->chan_lock); b = dev->bundle; if (!b) { spin_unlock_bh(&dev->chan_lock); return -EUNATCH; } spin_lock(&b->bundle_lock); leave_bundle(dev); spin_unlock(&b->bundle_lock); spin_unlock_bh(&dev->chan_lock); return 0; } else if (BIOC_SETLCFL == cmd) { if (!dev) return -ENODEV; dev->lflags = arg; if (dev->bundle) { local_irq_disable(); kick_tx(dev); local_irq_enable(); } return 0; } else if (BIOC_GETLCFL == cmd) { if (!dev) return -ENODEV; i = dev->lflags; if (copy_to_user((void *)arg, &i, sizeof(i))) return -EFAULT; return 0; } else if (BIOC_SETRCFL == cmd) { if (!dev) return -ENODEV; dev->rflags = arg; if (dev->bundle) { local_irq_disable(); kick_tx(dev); local_irq_enable(); } return 0; } else if (BIOC_GETRCFL == cmd) { if (!dev) return -ENODEV; i = dev->rflags; if (copy_to_user((void *)arg, &i, sizeof(i))) return -EFAULT; return 0; } else if (BIOCHANGUP == cmd) { if (dev) { ch = dev->ch; i = 0; local_irq_disable(); if (dev->ch && dev->user_active) { dev->user_active = 0; local_irq_enable(); i = ch->Hangup(dev->ch); } else local_irq_enable(); return i; } if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; local_irq_disable(); i = ch->Hangup(ch); local_irq_enable(); return i; } else if (BIOC_SETCALLTYPE == cmd) { if (dev && dev->ch) ch = dev->ch; else if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; ch->callType = arg; return 0; } else if (BIOC_GETCALLTYPE == cmd) { if (dev && dev->ch) ch = dev->ch; else if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; if (copy_to_user((void *)arg, &ch->callType, sizeof(int))) return -EFAULT; return 0; } else if (BIOC_GETCAUSECODE == cmd) { if (!dev) return -ENODEV; if (copy_to_user((void *)arg, &dev->user_status, sizeof(int))) return -EFAULT; return 0; } else if (BIOC_GET_CALLING_NUMBER == cmd) { if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; pr_debug("CallerNumber: %s\n", ch->CallerNumber); if (copy_to_user((void *)arg, &ch->CallerNumber, sizeof(ch->CallerNumber))) return -EFAULT; return 0; } else if (BIOC_GET_CALLED_NUMBER == cmd) { if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; pr_debug("CalledNumber: %s\n", ch->CalledNumber); if (copy_to_user((void *)arg, &ch->CalledNumber, sizeof(ch->CalledNumber))) return -EFAULT; return 0; } else if (BIOC_GET_MAX_MRU == cmd) { if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; i = ch->mru; if (copy_to_user((void *)arg, &i, sizeof(int))) return -EFAULT; return 0; } else if (BIOCCREATEBUNDLE == cmd) { if (dev->allocd_bundle) return -EINVAL; dev->allocd_bundle = alloc_bundle(arg, dev->net, NULL); if (!dev->allocd_bundle) return -ENOMEM; return 0; } else if (BIOCCREATEBUNDLENAME == cmd) { char name[32]; if (dev->allocd_bundle) return -EINVAL; if (strncpy_from_user(name, (void *)arg, 31) < 0) return -EFAULT; name[31] = 0; dev->allocd_bundle = alloc_bundle(-1, dev->net, name); if (!dev->allocd_bundle) return -ENOMEM; return 0; } else if (BIOCDESTROYBUNDLE == cmd) { struct bundle *b; if (!dev) return -EPIPE; spin_lock_bh(&dev->chan_lock); b = dev->allocd_bundle; if (!b) { spin_unlock_bh(&dev->chan_lock); return -EINVAL; } spin_lock(&b->bundle_lock); if (b->chan) { printk("bundle(%s) still has channels!\n", b->name); spin_unlock(&b->bundle_lock); spin_unlock_bh(&dev->chan_lock); return -EBUSY; } dev->allocd_bundle = NULL; spin_unlock(&b->bundle_lock); spin_unlock_bh(&dev->chan_lock); free_bundle(b); return 0; #ifdef CONFIG_NET_NS } else if (BIOCSETBUNDLENETNS == cmd) { struct net *net = get_net_ns_by_pid(arg), *old; if (IS_ERR(net)) return PTR_ERR(net); old = dev->net; dev->net = net; if (old) put_net(old); return 0; #endif } else if (BIOC_GET_LINK_STATS == cmd) { struct bdev_stats stats; memset(&stats, 0, sizeof(stats)); ch = dev->ch; if (ch) stats = ch->stats; else stats = dev->final_stats; if (copy_to_user((void *)arg, &stats, sizeof(stats))) return -EFAULT; return 0; } else if (BIOC_GET_IDLE_MS == cmd) { unsigned long idle_ms; if (!dev) return -ENODEV; idle_ms = jiffies_to_msecs(jiffies - dev->activity_jiffies); if (copy_to_user((void *)arg, &idle_ms, sizeof(idle_ms))) return -EFAULT; return 0; } if (dev->allocd_bundle) { int ret = bdev_ioctl(dev->allocd_bundle, cmd, arg); if (ret != -ENOSYS) return ret; } if (dev->bundle) { int ret = bdev_ioctl(dev->bundle, cmd, arg); if (ret != -ENOSYS) return ret; } if (!ino) return -ENOSYS; if (((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENOSYS; return ch->ioctl(ch, cmd, arg); } static int b_wait_for_call(struct file *filp, unsigned idx) { DECLARE_WAITQUEUE(wait, current); struct chan *call; int ret = 0; pr_debug("b_wait_for_call(%u)\n", idx); call = filp->private_data = get_call(idx, &ret); if (!filp->private_data) return ret; /* * This is the race free way of doing things. */ call->user_calltypes = ~0; current->state = TASK_INTERRUPTIBLE; add_wait_queue(&call->wait, &wait); if (call->user_calltypes && !call->user_active) { schedule(); if (signal_pending(current)) ret = -ERESTARTSYS; } current->state = TASK_RUNNING; remove_wait_queue(&call->wait, &wait); if (ret) { put_call(call); filp->private_data = NULL; } return ret; } /* * Called when a program opens one of our device nodes */ static int b_open(struct inode *ino, struct file *filp) { int ret = 0; channel_t *ch; pr_debug("b_open(%p) bab%d\n", filp, MINOR(ino->i_rdev)); filp->private_data = NULL; if (((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; MOD_INC_USE_COUNT; ch->use(ch); /* non blocking mode means that the user just wants the fd to do some funky * ioctls right away. Worry about calls later on... */ if (!(O_NONBLOCK & filp->f_flags)) ret = b_wait_for_call(filp, MINOR(ino->i_rdev)); if (ret) { ch->unuse(ch); MOD_DEC_USE_COUNT; } pr_debug("b_open(%p)=%d %p complete.\n", filp, ret, filp->private_data); return ret; } /* * Called when a program closes one of our device nodes */ int babylon_release(struct inode *ino, struct file *filp) { int ret = -ENODEV; struct chan *dev; channel_t *ch; dev = filp->private_data; if (!dev) goto out; ret = babylon_release_call(dev, filp); out: ch = channels[MINOR(ino->i_rdev)]; ch->unuse(ch); return ret; } EXPORT_SYMBOL_GPL(babylon_release); int babylon_release_call(struct chan *dev, struct file *filp) { pr_debug("babylon_release(%p) %p\n", filp, filp->private_data); filp->private_data = NULL; /* hang up the call if this is the last user * -- count is 2 'cause this filp still uses it as well as the call... */ #if 0 if (1 == dev->use_count) { dev->user_calltypes = 0; dev->lflags = 0; dev->rflags = 0; } #endif pr_debug("babylon_release_call(%p), %d\n", dev, dev->use_count); put_call(dev); return 0; } EXPORT_SYMBOL_GPL(babylon_release_call); #if LINUX_VERSION_CODE < 0x20100 static int b_select(struct inode *ino, struct file *filp, int sel_type, select_table *wait) { struct chan *dev = filp->private_data; int ret = 0; pr_debug("b_select: dev=%p\n", dev); if (!dev) return 0; if (SEL_IN == sel_type) { ret = !dev->ch || !skb_queue_empty(&dev->rx_q) || !dev->user_active; if (!ret) select_wait(&dev->rx_wait, wait); } else if (SEL_OUT == sel_type) { unsigned long flags; local_irq_save(flags); local_irq_disable(); ret = !dev->ch || (dev->ch && CS_CONNECTED == dev->ch->state && skb_queue_len(&dev->tx_q) < CH_TX_Q_LEN); local_irq_restore(flags); if (!ret) select_wait(&dev->tx_wait, wait); } return ret; } #else unsigned int babylon_poll(struct file *filp, poll_table *wait) { struct chan *dev = filp->private_data; unsigned long flags; unsigned int mask; pr_debug("babylon_poll: dev=%p\n", dev); if (!dev) return 0; poll_wait(filp, &dev->wait, wait); mask = 0; if (!dev->ch || !skb_queue_empty(&dev->rx_q) || !dev->user_active) mask |= POLLIN | POLLRDNORM; local_irq_save(flags); local_irq_disable(); /* irq might pull out ch */ if (!dev->ch || (dev->ch && CS_CONNECTED == dev->ch->state && skb_queue_len(&dev->tx_q) < CH_TX_Q_LEN)) mask |= POLLOUT | POLLWRNORM; local_irq_restore(flags); return mask; } EXPORT_SYMBOL_GPL(babylon_poll); #endif ssize_t babylon_read(struct file *filp, char __user *buf, size_t len, loff_t *off) { DECLARE_WAITQUEUE(wait, current); struct chan *dev = filp->private_data; struct sk_buff *skb = NULL; int ret; if (!dev) return -ENODEV; ret = 0; current->state = TASK_INTERRUPTIBLE; add_wait_queue(&dev->rx_wait, &wait); while (NULL == (skb = skb_dequeue(&dev->rx_q)) && dev->user_active && dev->ch) { ret = -EAGAIN; if (O_NONBLOCK & filp->f_flags) break; schedule(); ret = -ERESTARTSYS; if (signal_pending(current)) break; ret = 0; } current->state = TASK_RUNNING; remove_wait_queue(&dev->wait, &wait); if (skb) { if (skb->len < len) len = skb->len; if (copy_to_user(buf, skb->data, len)) len = -EFAULT; b_kfree_skb(skb); } else len = ret; return len; } EXPORT_SYMBOL_GPL(babylon_read); ssize_t babylon_write(struct file *filp, const char __user *buf, size_t len, loff_t *off) { DECLARE_WAITQUEUE(wait, current); struct sk_buff *skb = NULL; struct chan *dev; char *tmpbuf; int ret; ret = -EINVAL; if (len <= 0 || len > 8000) goto out; skb = alloc_skb(len, GFP_KERNEL); ret = -ENOMEM; if (!skb) goto out; #if LINUX_VERSION_CODE < 0x20100 skb->free = FREE_READ; #endif dev = filp->private_data; ret = -ENODEV; if (!dev) goto out; tmpbuf = skb_put(skb, len); ret = -EFAULT; if (copy_from_user(tmpbuf, buf, len)) goto out; ret = -EPIPE; current->state = TASK_INTERRUPTIBLE; add_wait_queue(&dev->tx_wait, &wait); spin_lock_bh(&dev->chan_lock); while (dev->ch && dev->user_active && (CS_CONNECTED == dev->ch->state) && skb_queue_len(&dev->tx_q) >= CH_TX_Q_LEN) { spin_unlock_bh(&dev->chan_lock); if (O_NONBLOCK & filp->f_flags) { ret = -EAGAIN; break; } pr_debug("%s: write: sleeping len=%d\n", dev->ch->device_name, skb_queue_len(&dev->tx_q)); schedule(); ret = -ERESTARTSYS; if (signal_pending(current)) break; ret = -EPIPE; spin_lock_bh(&dev->chan_lock); } if (-ERESTARTSYS != ret && dev->ch && dev->user_active && (CS_CONNECTED == dev->ch->state) && (skb_queue_len(&dev->tx_q) < CH_TX_Q_LEN)) { pr_debug("%s: write: putting packet on queue\n", dev->ch->device_name); skb_queue_tail(&dev->tx_q, skb); kick_tx(dev); ret = 0; } spin_unlock_bh(&dev->chan_lock); current->state = TASK_RUNNING; remove_wait_queue(&dev->tx_wait, &wait); if (-EBUSY == ret) ret = -EAGAIN; out: if (ret && skb) b_dev_kfree_skb(skb); return ret ? ret : len; } EXPORT_SYMBOL_GPL(babylon_write); static const struct file_operations b_fops = { #if LINUX_VERSION_CODE >= 0x02032B .owner = THIS_MODULE, #endif .read = babylon_read, .write = babylon_write, #if LINUX_VERSION_CODE < 0x20100 .select = b_select, #else .poll = babylon_poll, #endif .unlocked_ioctl = babylon_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = babylon_unlocked_ioctl, #endif .open = b_open, .release = babylon_release, }; static int bdev_ioctl(struct bundle *b, unsigned int cmd, unsigned long arg) { int i; if (BIOC_SETLBFL == cmd) { b->lflags = arg; return 0; } else if (BIOC_GETLBFL == cmd) { i = b->lflags; if (copy_to_user((void *)arg, &i, sizeof(int))) return -EFAULT; return 0; } else if (BIOC_SETRBFL == cmd) { b->rflags = arg; return 0; } else if (BIOC_GETRBFL == cmd) { i = b->rflags; if (copy_to_user((void *)arg, &i, sizeof(int))) return -EFAULT; return 0; } else if (BIOC_GETBSTATS == cmd) { struct bdev_stats stats; memset(&stats, 0, sizeof(stats)); stats.tx_bytes = b->tx_bytes; stats.rx_bytes = b->rx_bytes; if (copy_to_user((void *)arg, &stats, sizeof(stats))) return -EFAULT; return 0; } else if (BIOCGETDEVID == cmd) { return b->index; } else if (BIOC_SET_MIN_FRAGS == cmd) { if (arg < 1) return -EINVAL; b->min_frags = arg; return 0; } else if (BIOC_SET_MAX_FRAGS == cmd) { if (arg < 1) return -EINVAL; b->max_frags = arg; return 0; } else if (BIOC_GET_MIN_FRAGS == cmd) { if (copy_to_user((void *)arg, &b->min_frags, sizeof(b->min_frags))) return -EFAULT; return 0; } else if (BIOC_GET_MAX_FRAGS == cmd) { if (copy_to_user((void *)arg, &b->max_frags, sizeof(b->max_frags))) return -EFAULT; return 0; } return -ENOSYS; } void __ch_input(channel_t *ch, struct sk_buff *skb) { pr_debug("b_Input(%s, %p len=%d)\n", ch->device_name, skb, skb->len); #if LINUX_VERSION_CODE < 0x20100 skb->free = 1; #endif if (!ch->link) { ch->stats.rx_dropped++; skb->dev = NULL; b_kfree_skb(skb); return; } skb->dev = ch->link; pr_debug("__ch_input: dev=%p\n", ch->link); rx_skb(skb); return; } /* * ch_Input also accounts for incoming packets on a channel. */ void ch_Input(channel_t *ch, struct sk_buff *skb) { /* FIXME: make this zero copy where possible */ if (!pskb_may_pull(skb, skb->len)) { ch->stats.rx_dropped++; return; } #if LINUX_VERSION_CODE < 0x20100 skb->free = 1; #endif __ch_input(ch, skb); #if 0 skb->mac.raw = skb->data; skb->dev = &ch->ndev; skb->protocol = ETH_P_PPP; netif_rx(skb); #endif } static int b_input(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) , struct net_device *orig_dev #endif ) { channel_t *ch = (channel_t *)dev; __ch_input(ch, skb); return 0; } static void b_Open(channel_t *ch) { pr_debug("b_Open(%s)\n", ch->device_name); } static void b_Close(channel_t *ch) { pr_debug("b_Close(%s)\n", ch->device_name); } static void b_Up(channel_t *ch) { struct chan *dev; pr_debug("b_Up(%s)\n", ch->device_name); if (!ch->link) setup_call(ch); if (!(dev = ch->link)) return; dev->user_active = 1; dev->dialing = 0; wake_up_interruptible(&dev->wait); wake_up_interruptible(&dev->tx_wait); if (test_busy(ch)) pr_debug("b_Up: busy 1\n"); kick_tx(dev); if (test_busy(ch)) pr_debug("b_Up: busy 2\n"); } static void b_Down(channel_t *ch) { struct chan *dev = ch->link; pr_debug("b_Down(%s)\n", ch->device_name); if (dev) { struct sk_buff *skb; while (NULL != (skb = skb_dequeue(&dev->tx_q))) b_kfree_skb(skb); dev->user_active = 0; wake_up_interruptible(&dev->rx_wait); wake_up_interruptible(&dev->tx_wait); wake_up_interruptible(&dev->wait); if (!ch->use_existing_call) drop_call(dev); } } static void b_ConnectComplete(channel_t *ch, int cause) { struct chan *dev = ch->link; pr_debug("b_ConnectComplete(%s, 0x%2x)\n", ch->device_name, cause); if (!dev) { pr_debug("b_ConnectComplete(%s, 0x%2x): No device.\n", ch->device_name, cause); if (!cause) { // dev->user_active = 0; ch->Hangup(ch); } return; } if (!cause) /* implicite Up */ dev->user_active = 1; else dev->user_active = 0; dev->dialing = 0; dev->user_status = cause; wake_up_interruptible(&dev->tx_wait); wake_up_interruptible(&dev->wait); if (cause) { wake_up_interruptible(&dev->rx_wait); if (!dev->use_count++) MOD_INC_USE_COUNT; if (!ch->use_existing_call) drop_call(dev); } } static void b_OutputComplete(channel_t *ch) { struct chan *dev = ch->link; pr_debug("%s: b_OutputComplete\n", ch->device_name); if (!dev) return; if (ch->state == CS_CONNECTED) kick_tx(dev); } int RegisterChannel(channel_t *ch) { static unsigned start_index; unsigned i, j; int ret; MOD_INC_USE_COUNT; ch->link = NULL; ch->Open = b_Open; ch->Close = b_Close; ch->Up = b_Up; ch->Down = b_Down; ch->OutputComplete = b_OutputComplete; ch->ConnectComplete = b_ConnectComplete; spin_lock(&channels_lock); j = start_index; for (i=0; idevice_name); MOD_DEC_USE_COUNT; return -ENOMEM; } start_index = (j + 1) % B_MAX_DEV; ch->channels_index = j; #if LINUX_VERSION_CODE < 0x02032B ch->ndev.name = ch->device_name; #else strncpy(ch->ndev.name, ch->device_name, IFNAMSIZ); #endif ch->ndev.type = ARPHRD_PPP; ret = 0; //register_netdev(&ch->ndev); if (!ret) channels[ch->channels_index] = ch; spin_unlock(&channels_lock); return ret; } void UnregisterChannel(channel_t *ch) { unsigned i = ch->channels_index; struct chan *call = NULL; if (i >= B_MAX_DEV || channels[i] != ch) { printk("UnregisterChannel(%p): channel(%s) not associated with device.\n", ch, ch->device_name); return; } b_Down(ch); b_Close(ch); //unregister_netdev(&ch->ndev); channels[i] = NULL; if (ch->link && ((struct chan *)ch->link)->multihop_other) { call = ((struct chan *)ch->link)->multihop_other; pr_debug("call(%p) use(%d) multihop_other(%p)\n", ch->link, ((struct chan *)ch->link)->use_count, call); } if (ch->link) drop_call(ch->link); if (call) pr_debug("other->multihop_other = %p\n", call->multihop_other); MOD_DEC_USE_COUNT; pr_debug("UnregisterChannel: freed.\n"); } unsigned int RegisterDeviceClass(char *name) { pr_debug("RegisterDeviceClass(%s)\n", name); return 0; } void UnregisterDeviceClass(unsigned int class) { pr_debug("UnregisterDeviceClass(%u)\n", class); } static char *chanStates[] = { "idle", "dialing", "ringing", "connecting", "connected", "disconnecting", "disconnected", "stalled", "unavailable" }; static void channel_seq_print(struct seq_file *seq, channel_t **ch_p) { struct chan *call; channel_t *ch; spin_lock(&channels_lock); ch = *ch_p; if (!ch) goto out; call = ch->link; seq_printf(seq, "%-3u %-15s %-10s %-16s %-7s %5x %4d %7lx %12llu %15llu %12llu %15llu %5d\n", ch->index, ch->device_name, ch->dev_class, ch->state > CS_UNAVAIL ? "unknown" : chanStates[ch->state], call && call->bundle ? call->bundle->name : "none", call ? call->lflags : 0, call ? call->use_count : 0, (long)test_busy(ch), /* must be long due to x86-64 compiler bug. */ (unsigned long long)ch->stats.rx_packets, (unsigned long long)ch->CH_rx_bytes, (unsigned long long)ch->stats.tx_packets, (unsigned long long)ch->CH_tx_bytes, call ? call->mtu : 0 ); out: spin_unlock(&channels_lock); } static int channel_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-3s %-15s %-10s %-16s %-7s %-5s %-4s %-7s %-12s %-15s %-12s %-15s %-5s\n", "dev", "name", "class", "state", "dev", "flags", "use", "busy", "rxpkt", "rx_b", "txpkt", "tx_b", "mtu" ); else channel_seq_print(seq, v); return 0; } static void *channel_from_index(struct seq_file *seq, loff_t *pos) { spin_lock(&channels_lock); if (*pos < 1) *pos = 1; while (*pos <= B_MAX_DEV) { channel_t **ch_p = &channels[*pos - 1]; if (*ch_p) { spin_unlock(&channels_lock); return ch_p; } ++*pos; } spin_unlock(&channels_lock); return NULL; } static void *channel_seq_start(struct seq_file *seq, loff_t *pos) { if (!*pos) return SEQ_START_TOKEN; if (*pos > B_MAX_DEV) return NULL; return channel_from_index(seq, pos); } static void *channel_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return channel_from_index(seq, pos); } static void channel_seq_stop(struct seq_file *seq, void *v) { return; } static struct seq_operations channel_seq_ops = { .start = channel_seq_start, .next = channel_seq_next, .stop = channel_seq_stop, .show = channel_seq_show, }; static int channel_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &channel_seq_ops); } static const struct file_operations proc_channel_fops = { .owner = THIS_MODULE, .open = channel_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void mpq_seq_print(struct seq_file *seq, struct bundle **b_p) { struct bundle *b; unsigned i; spin_lock(&b_devs_lock); b = ACCESS_ONCE(*b_p); if (!b || (b == B_DEVS_REGISTERING)) goto out; seq_printf(seq, "%s:", b->name); for (i=0; ifrags[i]; skb; skb = skb->next) seq_printf(seq, " %08x", *(u32 *)skb->data); } seq_putc(seq, '\n'); out: spin_unlock(&b_devs_lock); } static int mpq_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) return 0; else mpq_seq_print(seq, v); return 0; } static void *mpq_from_index(struct seq_file *seq, loff_t *pos) { spin_lock(&b_devs_lock); if (*pos < 1) *pos = 1; while (*pos <= B_MAX_DEV) { struct bundle **b_p = &b_devs[*pos - 1]; if (*b_p && (*b_p != B_DEVS_REGISTERING)) { spin_unlock(&b_devs_lock); return b_p; } ++*pos; } spin_unlock(&b_devs_lock); return NULL; } static void *mpq_seq_start(struct seq_file *seq, loff_t *pos) { if (!*pos) return SEQ_START_TOKEN; if (*pos > B_MAX_DEV) return NULL; return mpq_from_index(seq, pos); } static void *mpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return mpq_from_index(seq, pos); } static void mpq_seq_stop(struct seq_file *seq, void *v) { return; } static struct seq_operations mpq_seq_ops = { .start = mpq_seq_start, .next = mpq_seq_next, .stop = mpq_seq_stop, .show = mpq_seq_show, }; static int mpq_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &mpq_seq_ops); } static const struct file_operations proc_mpq_fops = { .owner = THIS_MODULE, .open = mpq_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void interface_seq_print(struct seq_file *seq, struct bundle **b_p) { struct bundle *b; struct chan *chan; spin_lock(&b_devs_lock); b = ACCESS_ONCE(*b_p); if (!b || (b == B_DEVS_REGISTERING)) goto out; seq_printf(seq, "%-7s %-4s %7u %5x %-5s", b->name, b->bdev->flags & IFF_UP ? "yes" : "no", b->num_frags, b->lflags, netif_queue_stopped(b->bdev) ? "yes" : "no"); chan = b->chan; do { if (!chan) break; seq_printf(seq, " %s", chan->ch ? chan->ch->device_name : NULL); chan = chan->next; } while (chan != b->chan) ; seq_putc(seq, '\n'); out: spin_unlock(&b_devs_lock); } static int interface_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-7s %-4s %-7s %-5s %-5s %s\n", "dev", "up", "mpFLen", "flags", "pause", "channels" ); else interface_seq_print(seq, v); return 0; } static void *interface_from_index(struct seq_file *seq, loff_t *pos) { spin_lock(&b_devs_lock); if (*pos < 1) *pos = 1; while (*pos <= B_MAX_DEV) { struct bundle **b_p = &b_devs[*pos - 1]; if (*b_p && (*b_p != B_DEVS_REGISTERING)) { spin_unlock(&b_devs_lock); return b_p; } ++*pos; } spin_unlock(&b_devs_lock); return NULL; } static void *interface_seq_start(struct seq_file *seq, loff_t *pos) { if (!*pos) return SEQ_START_TOKEN; if (*pos > B_MAX_DEV) return NULL; return interface_from_index(seq, pos); } static void *interface_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return interface_from_index(seq, pos); } static void interface_seq_stop(struct seq_file *seq, void *v) { return; } static struct seq_operations interface_seq_ops = { .start = interface_seq_start, .next = interface_seq_next, .stop = interface_seq_stop, .show = interface_seq_show, }; static int interface_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &interface_seq_ops); } static const struct file_operations proc_interface_fops = { .owner = THIS_MODULE, .open = interface_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void call_seq_print(struct seq_file *seq, struct chan **call_p) { struct chan *call; spin_lock(&channels_lock); call = *call_p; if (!call) goto out; seq_printf(seq, "%3d %3d %3d %s\n", (int)(call_p - calls), call->use_count, call->idx, call->ch ? call->ch->device_name : "none"); out: spin_unlock(&channels_lock); } static int call_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "i use idx ch\n"); else call_seq_print(seq, v); return 0; } static void *call_from_index(struct seq_file *seq, loff_t *pos) { spin_lock(&channels_lock); if (*pos < 1) *pos = 1; while (*pos <= B_MAX_DEV) { struct chan **call_p = &calls[*pos - 1]; if (*call_p) { spin_unlock(&channels_lock); return call_p; } ++*pos; } spin_unlock(&channels_lock); return NULL; } static void *call_seq_start(struct seq_file *seq, loff_t *pos) { if (!*pos) return SEQ_START_TOKEN; if (*pos > B_MAX_DEV) return NULL; return call_from_index(seq, pos); } static void *call_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return call_from_index(seq, pos); } static void call_seq_stop(struct seq_file *seq, void *v) { return; } static struct seq_operations call_seq_ops = { .start = call_seq_start, .next = call_seq_next, .stop = call_seq_stop, .show = call_seq_show, }; static int call_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &call_seq_ops); } static const struct file_operations proc_call_fops = { .owner = THIS_MODULE, .open = call_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static char devName[] = "bab"; static struct packet_type ppp_pt = { .type = ETH_P_PPP, .func = b_input, }; static struct class *bab_class; int init_module(void) { int ret; int i; printk(KERN_INFO "Babylon v" VER " Copyright 1999 Spellcast Telecommunications Inc.\n"); skb_queue_head_init(&rx_q); chan_cachep = kmem_cache_create("babylon_chan", sizeof(struct chan), 0, 0, NULL); if (!chan_cachep) return -ENOMEM; proc_create_data("bab_chan", 0444, NULL, &proc_channel_fops, NULL); proc_create_data("bab_dev", 0444, NULL, &proc_interface_fops, NULL); proc_create_data("bab_mpq", 0444, NULL, &proc_mpq_fops, NULL); proc_create_data("bab_call", 0444, NULL, &proc_call_fops, NULL); ret = register_chrdev(BAB_MAJOR, devName, &b_fops); if (!ret) dev_add_pack(&ppp_pt); bab_class = class_create(THIS_MODULE, "bab"); for (i=0; i= KERNEL_VERSION(2,6,27) NULL, #endif "bab%d", i); } return ret; } void cleanup_module(void) { int i; spin_lock(&b_devs_lock); for (i=0; i= 0x20600 remove_proc_entry("bab_chan", NULL); remove_proc_entry("bab_dev", NULL); remove_proc_entry("bab_mpq", NULL); remove_proc_entry("bab_call", NULL); #elif LINUX_VERSION_CODE < 0x2031B proc_unregister(&proc_root, proc_babylon_call.low_ino); proc_unregister(&proc_root, proc_babylon_mpq.low_ino); proc_unregister(&proc_root, proc_babylon_interfaces.low_ino); proc_unregister(&proc_root, proc_babylon_channels.low_ino); #else remove_proc_entry("bab_chan", &proc_root); remove_proc_entry("bab_dev", &proc_root); remove_proc_entry("bab_mpq", &proc_root); remove_proc_entry("bab_call", &proc_root); #endif kmem_cache_destroy(chan_cachep); printk(KERN_INFO "Babylon unloaded.\n"); } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) EXPORT_SYMBOL(RegisterChannel); EXPORT_SYMBOL(UnregisterChannel); EXPORT_SYMBOL(ch_ioctl); EXPORT_SYMBOL(ch_Input); #endif MODULE_LICENSE("GPL");