/* * kern.c - Core Kernel module * Copyright (C) 1997-2000 SpellCaster Telecommunications Inc. * $Id: kern.c,v 1.13 2004/08/24 01:46:14 bcrl Exp $ * Released under the GNU Public License. See LICENSE file for details. */ #include "../include/bab_module.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../include/vercomp.h" #include "../include/aps_if.h" #include "bab.h" #ifndef VER #define VER "test" #endif #if LINUX_VERSION_CODE > 0x20100 #include #endif #define pr_drop pr_debug #define BAB_MAJOR 60 #define BPPP_MAJOR 62 #define PPP_PROTO_IP 0x21 #define PPP_PROTO_IPX 0x2b #define PPP_PROTO_VJ_C 0x2d #define PPP_PROTO_VJ_UN 0x2f #define PPP_PROTO_MP 0x3d #define B_MAX_DEV (256*1024U) #define BPPP_TX_Q_LEN 2 #define CH_TX_Q_LEN 3 static struct kmem_cache *chan_cachep; static struct bundle *b_devs[B_MAX_DEV]; static channel_t *channels[B_MAX_DEV]; static struct chan *calls[B_MAX_DEV]; static unsigned callid; static struct sk_buff_head rx_q; static void put_call(struct chan *call); static void leave_bundle(struct chan *ch); static void demux_pkt(struct bundle *b, struct sk_buff *skb); static void __b_xmit(struct bundle *b, struct sk_buff *skb, u16 proto); static int bdev_ioctl(struct bundle *b, unsigned int cmd, unsigned long arg); static struct sk_buff *make_frag(struct bundle *b, int first) { struct sk_buff *frag_skb, *skb; int left = b->frag_split - b->frag_num; unsigned char *p; int bytes; if (left < 1) left = 1; frag_skb = b->frag_skb; bytes = (frag_skb->len - b->frag_offset) / left; skb = dev_alloc_skb(bytes); if (!skb) goto out; p = skb_push(skb, 1 + (!(b->rflags & BF_PFC) ? 1 : 0) + ((b->rflags & BF_SSN) ? 2 : 4)); if (!(b->rflags & BF_PFC)) *p++ = 0; *p++ = PPP_PROTO_MP; if (b->rflags & BF_SSN) { u16 val = (b->frag_seq | ((left == 1) ? 0x4000 : 0) | (first ? 0x8000 : 0)); /* I don't cast to u16 here as we're unaligned here */ *p++ = (val >> 8); *p++ = val; b->frag_seq = (b->frag_seq + 1) & 0xfff; } else { u32 val = (b->frag_seq | ((left == 1) ? 0x40000000 : 0) | (first ? 0x80000000 : 0)); *p++ = (val >> 24); *p++ = (val >> 16); *p++ = (val >> 8); *p++ = val; b->frag_seq = (b->frag_seq + 1) & 0xffffff; } #if LINUX_VERSION_CODE < 0x20100 skb->free = FREE_READ; #endif /* this is an overly paranoid check. it shouldn't happen, * but i'd rather not reboot if there's a bug somewhere. */ if (bytes < 0) goto out_doh; memcpy(skb_put(skb, bytes), frag_skb->data + b->frag_offset, bytes); //printk("made frag len=%d off=%d num=%d seq=%08x\n", skb->len, b->frag_offset, b->frag_num, b->frag_seq); b->frag_offset += bytes; b->frag_num++; if (1 == left) { //printk("freed frag_skb len=%d\n", b->frag_skb->len); b_dev_kfree_skb(b->frag_skb); b->frag_skb = NULL; } /* The code path for the first packet already does ACFC for us. */ if (!first && !(b->rflags & BF_ACFC)) { p = skb_push(skb, 2); *p++ = 0xff; *p++ = 0x03; } pr_debug("make_frag: len=%d, data=\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n" , skb->len, skb->data[ 0], skb->data[ 1], skb->data[ 2], skb->data[ 3], skb->data[ 4], skb->data[ 5], skb->data[ 6], skb->data[ 7], skb->data[ 8], skb->data[ 9], skb->data[10], skb->data[11], skb->data[12], skb->data[13], skb->data[14], skb->data[15] ); return skb; out_doh: printk(KERN_ALERT "%s: oh no! bytes<0!!!\n", b->name); b_dev_kfree_skb(skb); out: /* out of memory. damn. bump the sequence number (so the other end can detect a fragment loss, * and drop the fragment to free up some memory. */ b_dev_kfree_skb(b->frag_skb); b->frag_skb = NULL; b->frag_seq++; b->frag_seq &= (b->rflags & BF_SSN) ? 0xfff : 0xffffff; return NULL; } static inline void kick_tx_b(struct bundle *b) { struct sk_buff *skb = skb_dequeue(&b->tx_q); if (skb) { wake_up_interruptible(&b->tx_wait); __b_xmit(b, skb, skb->protocol); return; } #if LINUX_VERSION_CODE < 0x02032B clear_bit(0, &b->dev.tbusy); mark_bh(NET_BH); #else netif_wake_queue(&b->dev); #endif } static void kick_tx(struct chan *call) { struct sk_buff *skb; if (test_busy(call->ch)) { pr_debug("%s: kick_tx: busy\n", call->ch->device_name); return; } /* First things first: attempt to transmit any packets that are queued on the channel. */ while (!test_busy(call->ch) && (skb = skb_dequeue(&call->tx_q))) { pr_debug("%s: kick_tx: outputting\n", call->ch->device_name); if (!call->ch->Output(call->ch, skb)) { pr_debug("%s: kick_tx: good tx\n", call->ch->device_name); wake_up_interruptible(&call->tx_wait); wake_up_interruptible(&call->wait); } else { pr_debug("%s: kick_tx: output failed\n", call->ch->device_name); skb_queue_head(&call->tx_q, skb); return; } } /* now we send out any multilink packets that are queued for the bundle */ if (!test_busy(call->ch) && call->bundle) { struct bundle *b = call->bundle; if (b->tx_skb && !call->ch->Output(call->ch, b->tx_skb)) { b->dev.trans_start = jiffies; b->tx_skb = NULL; if (!b->frag_skb || !(b->tx_skb = make_frag(b, 0))) kick_tx_b(b); } } } static void __b_xmit(struct bundle *b, struct sk_buff *skb, u16 proto) { struct chan *ch; u8 *p; int i; again: if (proto <= 0xff && (b->rflags & BF_PFC) && (proto & 0x01)) p = skb_push(skb, 1); else { p = skb_push(skb, 2); *p++ = proto >> 8; } *p++ = proto; pr_debug("b_net_xmit: len=%d, data=%02x %02x %02x %02x %02x %02x %02x %02x\n", skb->len, skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7] ); /* HACK: we do this before dropping the packet so the daemon * can determine if someone's trying to transmit. -ben */ b->tx_bytes += skb->len; ch = b->chan; if (!ch) goto drop; b->stats.tx_packets ++; #if LINUX_VERSION_CODE >= 0x20100 b->stats.tx_bytes += skb->len; #endif /* Note that the way this is arranged causes all channels with * multilink enabled to put multilink headers on all packets * when more than one channel is in the bundle. This behaviour * is REQUIRED to work around a bug in certain routers: they * ignore IP packets w/o an ML header. It's also required for * ordering purposes when using compression. -ben */ if ((b->rflags & BF_PASS_ML) && (b->num_chan > 1)) { int num_idle = 0; /* okay, we're going to make a fragment */ b->frag_offset = 0; b->frag_num = 0; b->frag_split = 0; for (i=0; inum_chan; i++, ch = ch->next) { if (!ch->tx_reserved && !test_busy(ch->ch)) num_idle ++; } ch = b->chan; if (num_idle < 1) num_idle = b->num_chan; /* try to ensure that the smallest packet sent out is at * least 40 bytes. This helps thruput on bundles with a * large number of channels. */ while (num_idle > 1 && (skb->len / num_idle) < 40) num_idle --; b->frag_split = num_idle; b->frag_split = (b->dev.mtu >= (skb->len-1) ? 1 : num_idle); b->frag_skb = skb; skb = make_frag(b, 1); if (!skb) { static int make_frag_warn; if (++make_frag_warn < 5) printk(KERN_DEBUG"%s: ugh -- make_frag failed\n", b->name); goto drop_nofree; } } if (!(b->rflags & BF_ACFC)) { /* not a fragment, so put ACFC on it if needed */ p = skb_push(skb, 2); *p++ = 0xff; *p++ = 0x03; } i = 0; while (i++ < b->num_chan) { struct chan *next = ch->next; if (ch->tx_reserved || test_busy(ch->ch) || ch->ch->Output(ch->ch, skb)) { ch = next; continue; } b->chan = next; i = 0; /* normal fast path -- we've still got fragments, and are successfully making the pieces to transmit */ if (b->frag_skb && (skb = make_frag(b, 0))) continue; b->dev.trans_start = jiffies; pr_debug("b_net_xmit(end): len=%d, data=%02x %02x %02x %02x %02x %02x %02x %02x\n", skb->len, skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7] ); /* check if any control skbs were queued */ skb = skb_dequeue(&b->tx_q); if (skb) { wake_up_interruptible(&b->tx_wait); proto = skb->protocol; goto again; } /* our work here is done. */ #if LINUX_VERSION_CODE < 0x02032B clear_bit(0, &b->dev.tbusy); mark_bh(NET_BH); #else netif_wake_queue(&b->dev); #endif return; } b->tx_skb = skb; return; drop: b_dev_kfree_skb(skb); drop_nofree: b->stats.tx_dropped++; #if LINUX_VERSION_CODE < 0x02032B clear_bit(0, &b->dev.tbusy); #else netif_wake_queue(&b->dev); #endif } static int b_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct bundle *b = dev->priv; unsigned long flags; u16 proto; local_irq_save(flags); local_irq_disable(); #if LINUX_VERSION_CODE < 0x02032B if (test_and_set_bit(0, &dev->tbusy)) goto tx_busy; #else netif_stop_queue(dev); #endif /* pull off the header we threw on in b_hard_header */ skb_pull(skb, dev->hard_header_len); switch (ntohs(skb->protocol)) { case ETH_P_IP: proto = PPP_PROTO_IP; if (!(b->lflags & BF_PASS_IP)) goto drop; #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) { struct sk_buff *newskb; int newlen; u8 *cp; if (!(b->lflags & BF_VJC)) break; newskb = dev_alloc_skb(skb->len + 128); if (!newskb) goto drop; cp = skb->data; newlen = slhc_compress(b->slhc, cp, skb->len, newskb->data, &cp, 1); if (newlen <= 0) goto drop; if (cp == skb->data) { /* unchanged, so transmit as normal ip */ b_dev_kfree_skb(newskb); break; } if (newskb->data[0] & SL_TYPE_COMPRESSED_TCP) { //newskb->data[0] &= ~SL_TYPE_COMPRESSED_TCP; proto = PPP_PROTO_VJ_C; } else if (newskb->data[0] & SL_TYPE_UNCOMPRESSED_TCP) { newskb->data[0] = skb->data[0]; proto = PPP_PROTO_VJ_UN; } else { /* unchanged, so transmit as normal ip */ printk("wierd\n"); b_dev_kfree_skb(newskb); break; } newskb->dev = skb->dev; skb->dev = NULL; b_dev_kfree_skb(skb); skb = newskb; skb_put(skb, newlen); } #endif break; case ETH_P_IPX: proto = PPP_PROTO_IPX; if (!(b->lflags & BF_PASS_IPX)) goto drop; break; default: pr_debug("%s: dropping unknown protocol 0x%x\n", b->name, skb->protocol); goto drop; } local_irq_restore(flags); __b_xmit(b, skb, proto); #if LINUX_VERSION_CODE > 0x020302B netif_wake_queue(dev); #endif return 0; drop: b_dev_kfree_skb(skb); b->stats.tx_dropped++; /* we didn't transmit, but got the lock, so we must kick if anything's queued */ local_irq_restore(flags); kick_tx_b(b); #if LINUX_VERSION_CODE > 0x02032B netif_wake_queue(dev); #endif return 0; #if LINUX_VERSION_CODE < 0x02032B tx_busy: local_irq_restore(flags); pr_debug("tx while busy\n"); return -EBUSY; #endif } #if LINUX_VERSION_CODE < 0x20100 static int b_rebuild_header(void *buf, struct net_device *dev, unsigned long daddr, struct sk_buff *skb) #else static int b_rebuild_header(struct sk_buff *skb) #endif { return 0; } static struct net_device_stats *b_net_getstats(struct net_device *dev) { return &((struct bundle *)dev->priv)->stats; } static struct net_device_stats *ch_getstats(struct net_device *dev) { return &((channel_t *)dev)->stats; } static int b_net_open(struct net_device *dev) { #if LINUX_VERSION_CODE < 0x02032B dev->start = 1; #else netif_start_queue(dev); #endif return 0; } static int b_net_stop(struct net_device *dev) { struct bundle *b = (struct bundle *)dev; struct sk_buff *skb; /* hmm, need to signal down event -- just drop all channels? */ #if LINUX_VERSION_CODE < 0x02032B dev->start = 0; /* hmm, need to signal down event -- just drop all channels? */ set_bit(0, &b->dev.tbusy); #else netif_stop_queue(&b->dev); #endif if (b->tx_skb) { b_dev_kfree_skb(b->tx_skb); b->tx_skb = NULL; } if (b->frag_skb) { b_dev_kfree_skb(b->frag_skb); b->frag_skb = NULL; } while (NULL != (skb = skb_dequeue(&b->tx_q))) b_dev_kfree_skb(skb); wake_up_interruptible(&b->tx_wait); return 0; } /* we put our header onto the packet here */ static int b_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, void *daddr, void *saddr, unsigned len) { skb_push(skb, dev->hard_header_len); return dev->hard_header_len; } static int b_net_init(struct net_device *dev) { int i; dev->addr_len = 0; dev->tx_queue_len = 8; dev_init_buffers(dev); dev->change_mtu = NULL; dev->hard_header = b_hard_header; #if LINUX_VERSION_CODE < 0x20100 dev->family = AF_INET; dev->pa_brdaddr = 0xffffffff; dev->pa_mask = 0xffffffff; dev->pa_alen = 4; #endif dev->header_cache_update = NULL; dev->rebuild_header = b_rebuild_header; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->type = ARPHRD_PPP; dev->hard_header_len = 8+20+40; /* maximum length of header we will add in b_net_xmit */ dev->addr_len = ETH_ALEN; for (i=0; ibroadcast[i] = 0xff; dev->get_stats = b_net_getstats; dev->open = b_net_open; dev->stop = b_net_stop; dev->hard_start_xmit = b_net_xmit; dev->do_ioctl = NULL; return 0; } static int last_dev_id; int alloc_bdev_id(int id) { int i; if (id >= 0 || id < B_MAX_DEV) { if (!b_devs[id]) return id; } id = last_dev_id; for (i=0; i= 0) && (id < last_dev_id)) last_dev_id = id; } static struct bundle *alloc_bundle(int i) { struct bundle *b; MOD_INC_USE_COUNT; b = kmalloc(sizeof(*b), GFP_KERNEL); if (!b) goto out; if ((i >= 0) && (i < B_MAX_DEV) && b_devs[i]) { kfree(b); MOD_DEC_USE_COUNT; return b_devs[i]; } i = alloc_bdev_id(i); memset(b, 0, sizeof(*b)); init_waitqueue_head(&b->tx_wait); skb_queue_head_init(&b->tx_q); #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) b->slhc = slhc_init(16, 16); if (!b->slhc) { printk("slhc_init failed\n"); kfree(b); return NULL; } #endif if (b_devs[i]) goto out; b->dev.priv = b; b->dev.init = b_net_init; b->m_seq = 0xffffff; sprintf(b->name, "aps%d", i); b->index = i; #if LINUX_VERSION_CODE < 0x02032B b->dev.name = b->name; #else strncpy(b->dev.name, b->name, IFNAMSIZ); #endif if (0 == register_netdev(&b->dev)) { pr_debug("alloc_bundle succeeded(%s @ %p)\n", b->name, b); b_devs[i] = b; return b; } free_bdev_id(i); out: if (b) { #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) if (b->slhc) slhc_free(b->slhc); #endif kfree(b); } MOD_DEC_USE_COUNT; return b_devs[i]; } static int free_bundle(struct bundle *b) { int id = b->index; b_devs[id] = NULL; while (b->chan) leave_bundle(b->chan); #if LINUX_VERSION_CODE < 0x02032B set_bit(0, &b->dev.tbusy); #else netif_stop_queue(&b->dev); #endif unregister_netdev(&b->dev); #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) slhc_free(b->slhc); #endif kfree(b); free_bdev_id(id); MOD_DEC_USE_COUNT; return 0; } /* returns true if left >= right, implemented by checking if left - right is not a negative 24 bit signed number */ static inline int seq_ge(u32 left, u32 right) { return !((left - right) & 0x00800000); } /* returns true if left <= right, implemented by checking if left - right is not a negative 24 bit signed number */ static inline int seq_le(u32 left, u32 right) { return !((right - left) & 0x00800000); } static inline int seq_lt(u32 left, u32 right) { return !seq_ge(left, right); } static inline int seq_eq(u32 left, u32 right) { return (left & 0x00ffffff) == (right & 0x00ffffff); } static inline struct sk_buff *find_seq(struct bundle *b, u32 seq) { struct sk_buff *skb; for (skb=b->frags[seq & FRAG_HASH_MASK]; skb && !seq_eq(seq, *(u32 *)skb->data); skb=skb->next) ; pr_debug("find_seq(%08x)=%p\n", seq, skb); return skb; } static struct sk_buff *yank_seq(struct bundle *b, u32 seq) { struct sk_buff **skb_p, *skb; for (skb_p=&b->frags[seq & FRAG_HASH_MASK]; *skb_p && !seq_eq(seq, *(u32 *)(*skb_p)->data); skb_p=&(*skb_p)->next) ; #if 1 if (!*skb_p) printk(KERN_ALERT "yank_seq: %08x not found!\n", seq); #endif skb = *skb_p; *skb_p = skb->next; skb_pull(skb, 4); b->num_frags--; put_call((struct chan *)skb->dev); skb->dev = NULL; return skb; } /* Try to reassemble a fragment and be efficient about it. This code makes a couple of implicite assumptions, most notably that when it reads the sequence number out of a packet in the queue it just did a find_seq on, that sequence number will be identical. Otherwise, gaps in the queue may arise, leading to a NULL pointer deref in yank_seq. -ben */ static void try_to_reassem(struct bundle *b, u32 seq) { struct sk_buff *skb, *tmp; u32 b_seq, e_seq; unsigned len_needed = 0; e_seq = seq; if (e_seq & 0x80000000) e_seq++; do { if (!(skb = find_seq(b, e_seq))) goto out; e_seq = *(u32 *)skb->data; if (e_seq & 0x80000000) goto bad_ml; len_needed += skb->len - 4; pr_debug("try_to_reassem:e found %08x\n", e_seq); } while (!(0x40000000 & e_seq++)); e_seq--; b_seq = seq; if (b_seq & 0x40000000) b_seq--; do { if (!(skb = find_seq(b, b_seq))) goto out; b_seq = *(u32 *)skb->data; if (b_seq & 0x40000000) goto bad_ml; len_needed += skb->len - 4; pr_debug("try_to_reassem:b found %08x\n", b_seq); } while (!(0x80000000 & b_seq--)); b_seq++; //printk(KERN_DEBUG "%s: fragment complete from %08x -> %08x\n", b->name, b_seq, e_seq); /* we have a complete fragment */ skb = yank_seq(b, b_seq); if (skb_tailroom(skb) < (len_needed - skb->len)) { tmp = skb; skb = dev_alloc_skb(len_needed); if (!skb) { printk(KERN_WARNING "try_to_reassem: out of memory for %u byte packet\n", len_needed); goto out; } skb->dev = NULL; pr_debug("try_to_reassem: got new skb\n"); #if LINUX_VERSION_CODE < 0x20100 skb->free = 1; #endif goto copy; } b_seq++; /* we now walk the frament from start+1 -> end, putting them into the new buffer. */ do { pr_debug("try_to_reassem: getting %08x\n", b_seq); tmp = yank_seq(b, b_seq); copy: memcpy(skb_put(skb, tmp->len), tmp->data, tmp->len); b_kfree_skb(tmp); b_seq ++; } while (seq_le(b_seq, e_seq)) ; demux_pkt(b, skb); out: return; bad_ml: printk(KERN_WARNING "%s/%s: bad multilink sequence %08x discarded\n", b->name, ((struct chan *)skb->dev)->ch->device_name, *(u32 *)skb->data); skb = yank_seq(b, *(u32 *)skb->data); b_kfree_skb(skb); } static void discard_frags_le(struct bundle *b, u32 seq) { unsigned i; if (!b->num_frags) return; for (i=0; ifrags[i]; *skb_p; ) { struct sk_buff *skb = *skb_p; if (seq_le(*(u32 *)skb->data, seq)) { *skb_p = skb->next; skb->next = NULL; put_call((struct chan *)skb->dev); skb->dev = NULL; b_kfree_skb(skb); b->num_frags--; } else skb_p = &skb->next; } } } static void discard_all_frags(struct bundle *b) { unsigned i; if (!b->num_frags) return; for (i=0; ifrags[i]; skb; skb=b->frags[i]) { b->frags[i] = skb->next; skb->next = NULL; put_call((struct chan *)skb->dev); skb->dev = NULL; b_kfree_skb(skb); } } b->num_frags = 0; } static void recompute_m(struct bundle *b) { struct chan *ch = b->chan; int valid = 0; do { if (ch->m_valid && (!valid || seq_lt(ch->m_seq, b->m_seq))) { valid = 1; b->m_seq = ch->m_seq; } ch = ch->next; } while (ch != b->chan); if (valid) { u32 seq = b->m_seq; struct sk_buff *skb; while ((skb = find_seq(b, seq))) { if (0x40000000 & *(u32 *)skb->data) { discard_frags_le(b, seq); return; } seq--; if (0x80000000 & *(u32 *)skb->data) { discard_frags_le(b, seq); return; } } discard_frags_le(b, b->m_seq); } else discard_all_frags(b); } static void mp_reassem(struct bundle *b, struct sk_buff *skb) { struct chan *ch = (struct chan *)skb->dev; struct sk_buff *oskb; int redo_m; u32 seq; if (BF_SSN & b->lflags) { /* convert 12 bit short sequence number into 24 bit sequence number */ seq = ntohs(*(u16 *)skb->data); seq = ((seq & 0xc000) << 16) | (seq & 0x0fff); if (!(seq & 0x0800) && (b->m_seq & 0x0800)) seq |= (b->m_seq + 0x00001000) & 0x00fff000; else seq |= b->m_seq & 0x00fff000; pr_debug("%s: packet with short seq %04x -> %08x\n", ch->ch->device_name, ntohs(*(u16 *)skb->data), seq); skb_push(skb, 2); /* get extra space for internal storage of the sequence */ } else { seq = ntohl(*(u32 *)skb->data); pr_debug("%s: packet with long seq %08x\n", ch->ch->device_name, seq); } /* do we need to recompute m? */ redo_m = (!ch->m_valid || ch->m_seq == b->m_seq); ch->m_seq = seq; ch->m_valid = 1; if ((oskb=find_seq(b, seq))) goto discard; if ((seq & 0xc0000000) == 0xc0000000) { skb_pull(skb, 4); put_call((struct chan *)skb->dev); skb->dev = NULL; demux_pkt(b, skb); goto out; } pr_debug("adding skb=%p seq=%08x\n", skb, seq); *(u32 *)skb->data = seq; skb->prev = NULL; skb->next = b->frags[seq & FRAG_HASH_MASK]; b->frags[seq & FRAG_HASH_MASK] = skb; b->num_frags++; try_to_reassem(b, seq); out: if (redo_m) recompute_m(b); return; discard: printk(KERN_WARNING "%s: duplicate sequence number %08x ssn=%c\n", ch->ch->device_name, seq, (BF_SSN & b->lflags) ? 'y' : 'n'); ch->ch->stats.rx_errors++; put_call((struct chan *)skb->dev); skb->dev = NULL; b_kfree_skb(skb); /* it's best to be safe here and toss both, as otherwise the packet is likely to be bogus */ oskb = yank_seq(b, seq); b_kfree_skb(oskb); if (redo_m) recompute_m(b); } static inline int pull_protocol(struct bundle *b, struct sk_buff *skb) { int ret = -1; int protocol; if (htons(0xff03) == *(u16 *)skb->data) { if (!skb_pull(skb, 2)) { pr_drop("%s: skb_pull(2 -- ACF) failed\n", b->name); goto drop; } } protocol = *(u8 *)skb->data; if (!skb_pull(skb, 1)) { pr_drop("%s: skb_pull(1 -- proto a) failed\n", b->name); goto drop; } if (!(0x01 & protocol)) { protocol <<= 8; protocol |= *(u8 *)skb->data; if (!skb_pull(skb, 1)) { pr_drop("%s: skb_pull(1 -- proto b) failed\n", b->name); goto drop; } } ret = protocol; drop: return ret; } static void bab_ReInput(struct chan *dev, struct sk_buff *skb) { if (dev && dev->ch && dev->ch->ReInput) { dev->ch->ReInput(dev->ch, skb); return; } if (dev && skb_queue_len(&dev->rx_q) < 32) { skb_queue_tail(&dev->rx_q, skb); wake_up_interruptible(&dev->rx_wait); wake_up_interruptible(&dev->wait); } else { if (dev) dev->ch->stats.rx_dropped++; b_kfree_skb(skb); } } static void demux_pkt(struct bundle *b, struct sk_buff *skb) { struct chan *ch; int prot; u8 *p; pr_debug("demux_pkt\n"); prot = pull_protocol(b, skb); if (prot < 0) goto drop; skb->protocol = prot; pr_debug("rx_pkt: protocol = %04x\n", skb->protocol); if (b && skb->protocol < 0xc000) { skb_reset_mac_header(skb); switch (skb->protocol) { case PPP_PROTO_MP: if (skb->dev) { /* skb->dev is NULL for mp reassembled packets */ if (!(b->lflags & BF_PASS_ML)) { pr_drop("%s: MP: MP not enabled\n", b->name); goto drop; } mp_reassem(b, skb); return; } printk(KERN_ERR "%s: multilink header in multilink packet???? Dropping.\n", b->name); goto out; #if defined(CONFIG_SLHC_BUILTIN) || defined(CONFIG_SLHC_MODULE) case PPP_PROTO_VJ_C: { int newlen; if (!(b->lflags & BF_VJC)) { pr_drop("%s: VJ_C: VJC not enabled\n", b->name); goto drop; } newlen = slhc_uncompress(b->slhc, skb->data, skb->len); if (newlen <= 0) { pr_drop("slhc err on %d\n", skb->len); goto drop; } if (newlen > skb->len) skb_put(skb, newlen - skb->len); else if (newlen < skb->len) skb_trim(skb, newlen); if (!(b->lflags & BF_PASS_IP)) goto drop; skb->protocol = htons(ETH_P_IP); break; } case PPP_PROTO_VJ_UN: if (!(b->lflags & BF_VJC)) { pr_drop("%s: VJ_UN: VJC not enabled\n", b->name); goto drop; } if (slhc_remember(b->slhc, skb->data, skb->len) <= 0) { pr_drop("%s: slhc_remember failed\n", b->name); goto drop; } #endif case PPP_PROTO_IP: if (!(b->lflags & BF_PASS_IP)) { pr_drop("%s: dropping ip packet\n", b->name); goto drop; } skb->protocol = htons(ETH_P_IP); break; case PPP_PROTO_IPX: if (!(b->lflags & BF_PASS_IPX)) { pr_drop("%s: dropping ipx packet\n", b->name); goto drop; } skb->protocol = htons(ETH_P_IPX); break; default: goto out; } pr_debug("rx_pkt: calling netif_rx len=%d, data=%02x %02x %02x %02x\n", skb->len, skb->data[0], skb->data[1], skb->data[2], skb->data[3]); b->rx_bytes += skb->len; b->stats.rx_packets ++; #if LINUX_VERSION_CODE >= 0x20100 b->stats.rx_bytes += skb->len; #endif if (skb->dev) put_call((struct chan *)skb->dev); skb->dev = &b->dev; netif_rx(skb); return; } out: p = skb_push(skb, 2); *p++ = skb->protocol >> 8; *p++ = skb->protocol; if (skb->dev) { ch = (struct chan *)skb->dev; put_call((struct chan *)skb->dev); skb->dev = NULL; } else if (b) { /* put a dummy multilink header on the packet for Babylon. */ ch = b->chan; if (BF_SSN & b->lflags) { p = skb_push(skb, 4); p[2] = 0xc0 | ((b->dummy_seq >> 8) & 0x0f); p[3] = b->dummy_seq; b->dummy_seq = (b->dummy_seq + 1) & 0x0fff; } else { p = skb_push(skb, 6); p[2] = 0xc0; p[3] = b->dummy_seq >> 16; p[4] = b->dummy_seq >> 8; p[5] = b->dummy_seq; b->dummy_seq = (b->dummy_seq + 1) & 0x00ffffff; } p[0] = PPP_PROTO_MP >> 8; p[1] = PPP_PROTO_MP; if (!(b->lflags & BF_ACFC)) { p = skb_push(skb, 2); *p++ = 0xff; *p++ = 0x03; } } else ch = NULL; bab_ReInput(ch, skb); return; drop: b->stats.rx_errors++; if (skb->dev) put_call((struct chan *)skb->dev); skb->dev = NULL; b_kfree_skb(skb); } static inline void rx_skb(struct sk_buff *skb) { struct chan *ch = (struct chan *)skb->dev; pr_debug("input_bh: len=%d, data=\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n" " %02x %02x %02x %02x %02x %02x %02x %02x\n" , skb->len, skb->data[ 0], skb->data[ 1], skb->data[ 2], skb->data[ 3], skb->data[ 4], skb->data[ 5], skb->data[ 6], skb->data[ 7], skb->data[ 8], skb->data[ 9], skb->data[10], skb->data[11], skb->data[12], skb->data[13], skb->data[14], skb->data[15] ); if ((ch->lflags & BF_PPP) && ch->bundle) { demux_pkt(ch->bundle, skb); return; } pr_debug("input_bh(%s, %p): ppp !active, putting in channel queue\n", ch->ch->device_name, skb); skb->dev = NULL; bab_ReInput(ch, skb); put_call(ch); } static int setup_call(channel_t *ch) { struct chan *call; int i; call = kmem_cache_alloc(chan_cachep, GFP_ATOMIC); if (!call) { printk("Erk\n"); return -ENOMEM; } memset(call, 0, sizeof(*call)); call->callid = -1; for (i=0; icallid = callid; callid = (callid + 1) % B_MAX_DEV; break; } callid = (callid + 1) % B_MAX_DEV; } if (call->callid == -1) printk("no callid\n"); skb_queue_head_init(&call->rx_q); skb_queue_head_init(&call->tx_q); init_waitqueue_head(&call->rx_wait); init_waitqueue_head(&call->tx_wait); init_waitqueue_head(&call->wait); call->ch = ch; call->use_count = 1; /* use count of 1 is the channel_t->link */ MOD_INC_USE_COUNT; ch->link = call; call->idx = -1; return 0; } static struct chan *get_call(unsigned idx, int *retp) { struct chan *call = NULL; channel_t *ch; local_irq_disable(); if (idx >= B_MAX_DEV || !(ch = channels[idx])) { *retp = -ENODEV; goto out; } if (!ch->link) setup_call(ch); call = ch->link; if (call) { call->idx = idx; } else *retp = -EAGAIN; out: local_irq_enable(); return call; } static int join_bundle(struct chan *ch, struct bundle *b) { int ret; pr_debug("join_bundle(%s, %s)\n", ch->ch->device_name, b->name); local_irq_disable(); ret = -EBUSY; if (ch->bundle) goto out; ret = -EIO; if (CS_CONNECTED != ch->ch->state) goto out; ret = 0; ch->bundle = b; if (b->chan) { ch->next = b->chan->next; b->chan->next = ch; } else { ch->next = ch; b->chan = ch; } b->num_chan++; kick_tx(ch); out: local_irq_enable(); return ret; } static void leave_bundle(struct chan *ch) { struct bundle *b; struct chan **prevp; unsigned long flags; pr_debug("leave_bundle(%s)\n", ch->ch->device_name); local_irq_save(flags); local_irq_disable(); if (!(b=ch->bundle)) goto out; prevp = &b->chan->next; do { if (*prevp == ch) break; prevp = &(*prevp)->next; } while (prevp != &b->chan->next); if (*prevp == ch) *prevp = ch->next; if (b->chan == ch) b->chan = (ch->next == ch) ? NULL : ch->next; ch->next = NULL; ch->bundle = NULL; b->num_chan--; if (!b->chan) { /* bundle is now idle, need to tidy up for next use */ discard_all_frags(b); b->m_seq = 0xffffff; } wake_up_interruptible(&b->tx_wait); out: local_irq_restore(flags); } static void put_call(struct chan *call) { unsigned long flags; local_irq_save(flags); local_irq_disable(); if (!--call->use_count) { struct sk_buff *skb; channel_t *ch; ch = call->ch; if (ch) { call->ch = NULL; ch->link = NULL; /* only do the hangup after dropping the call, as hangup will cause Down to be called * and we can't let it see the call in a half-closed state. */ if (call->user_active) { if (ch->Hangup(ch)) { printk(KERN_DEBUG "bab: can't abort call -- memory leak!\n"); goto out; } call->user_active = 0; } } if (call->bundle) leave_bundle(call); while (NULL != (skb = skb_dequeue(&call->rx_q))) b_kfree_skb(skb); while (NULL != (skb = skb_dequeue(&call->tx_q))) b_dev_kfree_skb(skb); if (call->callid >= 0 && call->callid < B_MAX_DEV && calls[call->callid] == call) { calls[call->callid] = NULL; if (call->callid < callid) callid = call->callid; } if (call->allocd_bundle) { free_bundle(call->allocd_bundle); call->allocd_bundle = NULL; } kmem_cache_free(chan_cachep, call); out: MOD_DEC_USE_COUNT; } local_irq_restore(flags); } /* * forcibly drops a call */ static void drop_call(struct chan *call) { leave_bundle(call); call->user_active = 0; call->ch->link = NULL; call->ch = NULL; wake_up_interruptible(&call->rx_wait); wake_up_interruptible(&call->tx_wait); wake_up_interruptible(&call->wait); put_call(call); } static int b_dial(unsigned idx, struct file *filp, const char *number) { DECLARE_WAITQUEUE(wait, current); struct chan *dev; int ret = 0; pr_debug("b_dial(%d, %s)\n", idx, number); local_irq_disable(); dev = get_call(idx, &ret); if (!dev) { local_irq_enable(); return ret; } if (dev->user_active || dev->dialing) { local_irq_enable(); put_call(dev); return -EBUSY; } if (filp->private_data) put_call(filp->private_data); filp->private_data = dev; dev->dialing = 1; dev->user_status = 0; dev->user_active = 1; if (!(O_NONBLOCK & filp->f_flags)) { current->state = TASK_INTERRUPTIBLE; add_wait_queue(&dev->wait, &wait); } ret = dev->ch->Connect(dev->ch, number, dev->ch->callType); if (ret) dev->user_active = 0; local_irq_enable(); if (!(O_NONBLOCK & filp->f_flags)) { if (!ret) { schedule(); if (signal_pending(current)) ret = -ERESTARTSYS; else ret = dev->user_status; } current->state = TASK_RUNNING; remove_wait_queue(&dev->wait, &wait); } return ret; } int ch_JoinBundle(channel_t *ch, unsigned long arg) { struct chan *dev = ch->link; if (!dev) return -EPIPE; if (dev->bundle) return -EBUSY; if (arg >= B_MAX_DEV || !b_devs[arg]) return -ENOENT; return join_bundle(dev, b_devs[arg]); } /* * Called when a program makes an ioctl call against one of our * file nodes */ static int b_ioctl(struct inode *ino, struct file *filp, unsigned int cmd, unsigned long arg) { struct chan *dev = filp->private_data; return ch_ioctl(ino, filp, dev, cmd, arg); } int ch_ioctl(struct inode *ino, struct file *filp, struct chan *dev, unsigned int cmd, unsigned long arg) { struct bundle *b; channel_t *ch; int i; pr_debug("b_ioctl(%p, %08x, %08lx)\n", dev, cmd, arg); if (BIOCGETCHNAME == cmd) { if (!dev) return -ENODEV; if (copy_to_user((void *)arg, dev->ch->device_name, 1 + strlen(dev->ch->device_name))) return -EFAULT; return 0; } else if (BIOCDIAL == cmd) { char tmp[32]; if (dev) return -EBUSY; if (copy_from_user(tmp, (void *)arg, 31)) return -EFAULT; tmp[31] = 0; return b_dial(MINOR(ino->i_rdev), filp, tmp); } else if (BIOCANSWER == cmd) { if (!dev) { int ret; dev = get_call(MINOR(ino->i_rdev), &ret); if (!dev) return ret; filp->private_data = dev; } dev->user_calltypes = arg; return 0; } else if (BIOCGETCALLID == cmd) { if (!dev) return -ENODEV; return dev->callid; } else if (BIOCGETDEVID == cmd) { if (!dev) return -ENODEV; b = dev->bundle; if (!b) { if (dev->allocd_bundle) return dev->allocd_bundle->index; return -EUNATCH; } return b->index; } else if (BIOCJOINBUNDLE == cmd) { return ch_JoinBundle(dev->ch, arg); } else if (BIOCLEAVEBUNDLE == cmd) { if (!dev) return -ENODEV; if (!dev->bundle) return -EUNATCH; leave_bundle(dev); return 0; } else if (BIOC_SETLCFL == cmd) { if (!dev) return -ENODEV; dev->lflags = arg; if (dev->bundle) { local_irq_disable(); kick_tx(dev); local_irq_enable(); } return 0; } else if (BIOC_GETLCFL == cmd) { if (!dev) return -ENODEV; i = dev->lflags; if (copy_to_user((void *)arg, &i, sizeof(i))) return -EFAULT; return 0; } else if (BIOC_SETRCFL == cmd) { if (!dev) return -ENODEV; dev->rflags = arg; if (dev->bundle) { local_irq_disable(); kick_tx(dev); local_irq_enable(); } return 0; } else if (BIOC_GETRCFL == cmd) { if (!dev) return -ENODEV; i = dev->rflags; if (copy_to_user((void *)arg, &i, sizeof(i))) return -EFAULT; return 0; } else if (BIOCHANGUP == cmd) { if (dev) { ch = dev->ch; i = 0; local_irq_disable(); if (dev->ch && dev->user_active) { dev->user_active = 0; i = ch->Hangup(dev->ch); } local_irq_enable(); return i; } if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; local_irq_disable(); i = ch->Hangup(ch); local_irq_enable(); return i; } else if (BIOC_SETCALLTYPE == cmd) { if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; ch->callType = arg; return 0; } else if (BIOC_GETCALLTYPE == cmd) { if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; if (copy_to_user((void *)arg, &ch->callType, sizeof(int))) return -EFAULT; return 0; } else if (BIOC_GETCAUSECODE == cmd) { if (!dev) return -ENODEV; if (copy_to_user((void *)arg, &dev->user_status, sizeof(int))) return -EFAULT; return 0; } else if (BIOC_GET_CALLING_NUMBER == cmd) { if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; printk("CallerNumber: %s\n", ch->CallerNumber); if (copy_to_user((void *)arg, &ch->CallerNumber, sizeof(ch->CallerNumber))) return -EFAULT; return 0; } else if (BIOC_GET_CALLED_NUMBER == cmd) { if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; printk("CalledNumber: %s\n", ch->CalledNumber); if (copy_to_user((void *)arg, &ch->CalledNumber, sizeof(ch->CalledNumber))) return -EFAULT; return 0; } else if (BIOC_GET_MAX_MRU == cmd) { if (!ino || ((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; i = ch->mru; if (copy_to_user((void *)arg, &i, sizeof(int))) return -EFAULT; return 0; } else if (BIOCCREATEBUNDLE == cmd) { if (dev->allocd_bundle) return -EINVAL; dev->allocd_bundle = alloc_bundle(arg); if (!dev->allocd_bundle) return -ENOMEM; return 0; } else if (BIOCDESTROYBUNDLE == cmd) { struct bundle *b = dev->allocd_bundle; if (!b) return -EINVAL; dev->allocd_bundle = NULL; free_bundle(b); return 0; } if (dev->allocd_bundle) { int ret = bdev_ioctl(dev->allocd_bundle, cmd, arg); if (ret != -ENOSYS) return ret; } if (!ino) return -ENOSYS; if (((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENOSYS; return ch->ioctl(ch, cmd, arg); } static int b_wait_for_call(struct file *filp, unsigned idx) { DECLARE_WAITQUEUE(wait, current); struct chan *call; int ret = 0; pr_debug("b_wait_for_call(%u)\n", idx); call = filp->private_data = get_call(idx, &ret); if (!filp->private_data) return ret; /* * This is the race free way of doing things. */ call->user_calltypes = ~0; current->state = TASK_INTERRUPTIBLE; add_wait_queue(&call->wait, &wait); if (call->user_calltypes && !call->user_active) { schedule(); if (signal_pending(current)) ret = -ERESTARTSYS; } current->state = TASK_RUNNING; remove_wait_queue(&call->wait, &wait); if (ret) { put_call(call); filp->private_data = NULL; } return ret; } /* * Called when a program opens one of our device nodes */ static int b_open(struct inode *ino, struct file *filp) { int ret = 0; channel_t *ch; pr_debug("b_open(%p) bab%d\n", filp, MINOR(ino->i_rdev)); filp->private_data = NULL; if (((MINOR(ino->i_rdev) >= B_MAX_DEV) || !(ch = channels[MINOR(ino->i_rdev)]))) return -ENODEV; MOD_INC_USE_COUNT; ch->use(ch); /* non blocking mode means that the user just wants the fd to do some funky * ioctls right away. Worry about calls later on... */ if (!(O_NONBLOCK & filp->f_flags)) ret = b_wait_for_call(filp, MINOR(ino->i_rdev)); if (ret) { ch->unuse(ch); MOD_DEC_USE_COUNT; } pr_debug("b_open(%p)=%d %p complete.\n", filp, ret, filp->private_data); return ret; } /* * Called when a program closes one of our device nodes */ #if LINUX_VERSION_CODE < 0x20100 static void b_release(struct inode *ino, struct file *filp) #else static int b_release(struct inode *ino, struct file *filp) #endif { channel_t *ch; struct chan *dev; pr_debug("b_release(%p) %p\n", filp, filp->private_data); dev = filp->private_data; if (!dev) goto out; filp->private_data = NULL; local_irq_disable(); /* hang up the call if this is the last user * -- count is 2 'cause this filp still uses it as well as the call... */ #if 0 if (1 == dev->use_count) { dev->user_calltypes = 0; dev->lflags = 0; dev->rflags = 0; } #endif put_call(dev); local_irq_enable(); out: ch = channels[MINOR(ino->i_rdev)]; ch->unuse(ch); #if LINUX_VERSION_CODE >= 0x20100 return 0; #endif } #if LINUX_VERSION_CODE < 0x20100 static int b_select(struct inode *ino, struct file *filp, int sel_type, select_table *wait) { struct chan *dev = filp->private_data; int ret = 0; pr_debug("b_select: dev=%p\n", dev); if (!dev) return 0; if (SEL_IN == sel_type) { ret = !dev->ch || !skb_queue_empty(&dev->rx_q); if (!ret) select_wait(&dev->rx_wait, wait); } else if (SEL_OUT == sel_type) { unsigned long flags; local_irq_save(flags); local_irq_disable(); ret = !dev->ch || (dev->ch && CS_CONNECTED == dev->ch->state && skb_queue_len(&dev->tx_q) < CH_TX_Q_LEN); local_irq_restore(flags); if (!ret) select_wait(&dev->tx_wait, wait); } return ret; } #else static unsigned int b_poll(struct file *filp, poll_table *wait) { struct chan *dev = filp->private_data; unsigned long flags; unsigned int mask; pr_debug("b_poll: dev=%p\n", dev); if (!dev) return 0; poll_wait(filp, &dev->wait, wait); mask = 0; if (!dev->ch || !skb_queue_empty(&dev->rx_q)) mask |= POLLIN | POLLRDNORM; local_irq_save(flags); local_irq_disable(); /* irq might pull out ch */ if (!dev->ch || (dev->ch && CS_CONNECTED == dev->ch->state && skb_queue_len(&dev->tx_q) < CH_TX_Q_LEN)) mask |= POLLOUT | POLLWRNORM; local_irq_restore(flags); return mask; } #endif #if LINUX_VERSION_CODE < 0x20100 static int b_read(struct inode *ino, struct file *filp, char *buf, int len) #else static ssize_t b_read(struct file *filp, char __user *buf, size_t len, loff_t *off) #endif { DECLARE_WAITQUEUE(wait, current); struct chan *dev = filp->private_data; struct sk_buff *skb = NULL; int ret; if (!dev) return -ENODEV; ret = -EPIPE; current->state = TASK_INTERRUPTIBLE; add_wait_queue(&dev->rx_wait, &wait); while (dev->user_active && NULL == (skb = skb_dequeue(&dev->rx_q)) && dev->ch) { ret = -EAGAIN; if (O_NONBLOCK & filp->f_flags) break; schedule(); ret = -ERESTARTSYS; if (signal_pending(current)) break; ret = -EPIPE; } current->state = TASK_RUNNING; remove_wait_queue(&dev->wait, &wait); if (skb) { if (skb->len < len) len = skb->len; if (copy_to_user(buf, skb->data, len)) len = -EFAULT; b_kfree_skb(skb); } else len = 0; return len; } #if LINUX_VERSION_CODE < 0x20100 static int b_write(struct inode *ino, struct file *filp, const char *buf, int len) #else static ssize_t b_write(struct file *filp, const char __user *buf, size_t len, loff_t *off) #endif { DECLARE_WAITQUEUE(wait, current); struct sk_buff *skb = NULL; struct chan *dev; char *tmpbuf; int ret; ret = -EINVAL; if (len <= 0 || len > 8000) goto out; skb = alloc_skb(len, GFP_KERNEL); ret = -ENOMEM; if (!skb) goto out; #if LINUX_VERSION_CODE < 0x20100 skb->free = FREE_READ; #endif dev = filp->private_data; ret = -ENODEV; if (!dev) goto out; tmpbuf = skb_put(skb, len); ret = -EFAULT; if (copy_from_user(tmpbuf, buf, len)) goto out; ret = -EPIPE; current->state = TASK_INTERRUPTIBLE; add_wait_queue(&dev->tx_wait, &wait); local_irq_disable(); while (dev->ch && dev->user_active && (CS_CONNECTED == dev->ch->state) && skb_queue_len(&dev->tx_q) >= CH_TX_Q_LEN) { local_irq_enable(); if (O_NONBLOCK & filp->f_flags) { ret = -EAGAIN; break; } pr_debug("%s: write: sleeping len=%d\n", dev->ch->device_name, skb_queue_len(&dev->tx_q)); schedule(); ret = -ERESTARTSYS; if (signal_pending(current)) break; ret = -EPIPE; local_irq_disable(); } if (-ERESTARTSYS != ret && dev->ch && dev->user_active && (CS_CONNECTED == dev->ch->state) && (skb_queue_len(&dev->tx_q) < CH_TX_Q_LEN)) { pr_debug("%s: write: putting packet on queue\n", dev->ch->device_name); skb_queue_tail(&dev->tx_q, skb); kick_tx(dev); ret = 0; } local_irq_enable(); current->state = TASK_RUNNING; remove_wait_queue(&dev->tx_wait, &wait); if (-EBUSY == ret) ret = -EAGAIN; out: if (ret && skb) b_dev_kfree_skb(skb); return ret ? ret : len; } static struct file_operations b_fops = { #if LINUX_VERSION_CODE >= 0x02032B .owner = THIS_MODULE, #endif .read = b_read, .write = b_write, #if LINUX_VERSION_CODE < 0x20100 .select = b_select, #else .poll = b_poll, #endif .ioctl = b_ioctl, .open = b_open, .release = b_release, }; /* * Called when a program opens one of our device nodes */ static int bppp_open(struct inode *ino, struct file *filp) { struct bundle *b; filp->private_data = NULL; b = alloc_bundle(-1); if (!b) return -ENOMEM; b->use++; filp->private_data = b; return 0; } static int bppp_release(struct inode *ino, struct file *filp) { struct bundle *b = filp->private_data; if (b) free_bundle(b); return 0; } static int bdev_ioctl(struct bundle *b, unsigned int cmd, unsigned long arg) { int i; if (BIOC_SETLBFL == cmd) { b->lflags = arg; return 0; } else if (BIOC_GETLBFL == cmd) { i = b->lflags; if (copy_to_user((void *)arg, &i, sizeof(int))) return -EFAULT; return 0; } else if (BIOC_SETRBFL == cmd) { b->rflags = arg; return 0; } else if (BIOC_GETRBFL == cmd) { i = b->rflags; if (copy_to_user((void *)arg, &i, sizeof(int))) return -EFAULT; return 0; } else if (BIOC_GETBSTATS == cmd) { struct bdev_stats stats; memset(&stats, 0, sizeof(stats)); stats.tx_bytes = b->tx_bytes; stats.rx_bytes = b->rx_bytes; if (copy_to_user((void *)arg, &stats, sizeof(stats))) return -EFAULT; return 0; } else if (BIOCGETDEVID == cmd) { return b->index; } return -ENOSYS; } static int bppp_ioctl(struct inode *ino, struct file *filp, unsigned int cmd, unsigned long arg) { struct bundle *b = filp->private_data; return bdev_ioctl(b, cmd, arg); } #if LINUX_VERSION_CODE < 0x20100 static int bppp_write(struct inode *ino, struct file *filp, const char *buf, int len) #else static ssize_t bppp_write(struct file *filp, const char *buf, size_t len, loff_t *off) #endif { DECLARE_WAITQUEUE(wait, current); struct sk_buff *skb = NULL; struct bundle *b; char *tmpbuf; int ret, proto; ret = -EINVAL; if (len <= 0 || len > 8000) goto out; b = filp->private_data; ret = -ENODEV; if (!b) goto out; skb = alloc_skb(len + 8 + b->dev.hard_header_len, GFP_KERNEL); ret = -ENOMEM; if (!skb) goto out; skb_reserve(skb, 8 + b->dev.hard_header_len); #if LINUX_VERSION_CODE < 0x20100 skb->free = FREE_READ; #endif tmpbuf = skb_put(skb, len); ret = -EFAULT; if (copy_from_user(tmpbuf, buf, len)) goto out; ret = -EINVAL; proto = pull_protocol(b, skb); if (proto < 0) goto out; skb->protocol = proto; ret = -EPIPE; current->state = TASK_INTERRUPTIBLE; add_wait_queue(&b->tx_wait, &wait); local_irq_disable(); while (b->chan && skb_queue_len(&b->tx_q) >= BPPP_TX_Q_LEN) { local_irq_enable(); if (O_NONBLOCK & filp->f_flags) { ret = -EAGAIN; break; } schedule(); ret = -ERESTARTSYS; if (signal_pending(current)) break; ret = -EPIPE; local_irq_disable(); } if (-ERESTARTSYS != ret && b->chan && skb_queue_len(&b->tx_q) < BPPP_TX_Q_LEN) { pr_debug("%s: write: putting packet on queue\n", b->name); skb_queue_tail(&b->tx_q, skb); /* kick_tx_b requires the transmit lock */ local_irq_enable(); #if LINUX_VERSION_CODE < 0x02032B if (!test_and_set_bit(0, &b->dev.tbusy)) #else if (!netif_queue_stopped(&b->dev)) #endif kick_tx_b(b); ret = 0; } else local_irq_enable(); current->state = TASK_RUNNING; remove_wait_queue(&b->tx_wait, &wait); if (-EBUSY == ret) ret = -EAGAIN; out: if (ret && skb) b_dev_kfree_skb(skb); return ret ? ret : len; } static struct file_operations bppp_fops = { #if LINUX_VERSION_CODE >= 0x02032B .owner = THIS_MODULE, #endif .write = bppp_write, #if LINUX_VERSION_CODE < 0x20100 .select = b_select, #else .poll = b_poll, #endif .ioctl = bppp_ioctl, .open = bppp_open, .release = bppp_release, }; void __ch_input(channel_t *ch, struct sk_buff *skb) { pr_debug("b_Input(%s, %p len=%d)\n", ch->device_name, skb, skb->len); #if LINUX_VERSION_CODE < 0x20100 skb->free = 1; #endif if (!ch->link) { ch->stats.rx_dropped++; skb->dev = NULL; b_kfree_skb(skb); return; } skb->dev = ch->link; if (!((struct chan *)ch->link)->use_count++) MOD_INC_USE_COUNT; pr_debug("__ch_input: dev=%p\n", ch->link); rx_skb(skb); return; } void ch_Input(channel_t *ch, struct sk_buff *skb) { #if LINUX_VERSION_CODE < 0x20100 skb->free = 1; #endif __ch_input(ch, skb); #if 0 skb->mac.raw = skb->data; skb->dev = &ch->ndev; skb->protocol = ETH_P_PPP; netif_rx(skb); #endif } static int b_input(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) , struct net_device *orig_dev #endif ) { channel_t *ch = (channel_t *)dev; __ch_input(ch, skb); return 0; } static void b_Open(channel_t *ch) { pr_debug("b_Open(%s)\n", ch->device_name); } static void b_Close(channel_t *ch) { pr_debug("b_Close(%s)\n", ch->device_name); } static void b_Up(channel_t *ch) { struct chan *dev; pr_debug("b_Up(%s)\n", ch->device_name); if (!ch->link) setup_call(ch); if (!(dev = ch->link)) return; dev->user_active = 1; dev->dialing = 0; wake_up_interruptible(&dev->wait); wake_up_interruptible(&dev->tx_wait); kick_tx(dev); } static void b_Down(channel_t *ch) { struct chan *dev = ch->link; pr_debug("b_Down(%s)\n", ch->device_name); if (dev) { struct sk_buff *skb; while (NULL != (skb = skb_dequeue(&dev->tx_q))) b_kfree_skb(skb); dev->user_active = 0; wake_up_interruptible(&dev->rx_wait); wake_up_interruptible(&dev->tx_wait); wake_up_interruptible(&dev->wait); if (dev->allocd_bundle) { free_bundle(dev->allocd_bundle); dev->allocd_bundle = NULL; } drop_call(dev); } } static void b_ConnectComplete(channel_t *ch, int cause) { struct chan *dev = ch->link; pr_debug("b_ConnectComplete(%s, 0x%2x)\n", ch->device_name, cause); if (!dev) { pr_debug("b_ConnectComplete(%s, 0x%2x): No device.\n", ch->device_name, cause); if (!cause) { // dev->user_active = 0; ch->Hangup(ch); } return; } if (!cause) /* implicite Up */ dev->user_active = 1; else dev->user_active = 0; dev->dialing = 0; dev->user_status = cause; wake_up_interruptible(&dev->tx_wait); wake_up_interruptible(&dev->wait); if (cause) { wake_up_interruptible(&dev->rx_wait); if (!dev->use_count++) MOD_INC_USE_COUNT; drop_call(dev); } } static void b_OutputComplete(channel_t *ch) { struct chan *dev = ch->link; pr_debug("%s: b_OutputComplete\n", ch->device_name); if (!dev) return; if (ch->state == CS_CONNECTED) kick_tx(dev); } int RegisterChannel(channel_t *ch) { static unsigned start_index; unsigned i, j; int ret; MOD_INC_USE_COUNT; ch->link = NULL; j = start_index; for (i=0; idevice_name); MOD_DEC_USE_COUNT; return -ENOMEM; } start_index = (j + 1) % B_MAX_DEV; ch->channels_index = j; ch->Open = b_Open; ch->Close = b_Close; ch->Up = b_Up; ch->Down = b_Down; ch->OutputComplete = b_OutputComplete; ch->ConnectComplete = b_ConnectComplete; #if LINUX_VERSION_CODE < 0x02032B ch->ndev.name = ch->device_name; #else strncpy(ch->ndev.name, ch->device_name, IFNAMSIZ); #endif ch->ndev.get_stats = ch_getstats; ch->ndev.type = ARPHRD_PPP; ret = 0; //register_netdev(&ch->ndev); if (!ret) channels[ch->channels_index] = ch; return ret; } void UnregisterChannel(channel_t *ch) { unsigned i = ch->channels_index; if (i >= B_MAX_DEV || channels[i] != ch) { printk("UnregisterChannel(%p): channel(%s) not associated with device.\n", ch, ch->device_name); return; } b_Down(ch); b_Close(ch); //unregister_netdev(&ch->ndev); channels[i] = NULL; if (ch->link) drop_call(ch->link); MOD_DEC_USE_COUNT; pr_debug("UnregisterChannel: freed.\n"); } unsigned int RegisterDeviceClass(char *name) { pr_debug("RegisterDeviceClass(%s)\n", name); return 0; } void UnregisterDeviceClass(unsigned int class) { pr_debug("UnregisterDeviceClass(%u)\n", class); } static char *chanStates[] = { "idle", "dialing", "ringing", "connecting", "connected", "disconnecting", "disconnected", "stalled", "unavailable" }; #if LINUX_VERSION_CODE < 0x2031B static int proc_channel_getinfo(char *buf, char **start, off_t offset, int len, int unused) #else static int proc_channel_getinfo(char *buf, char **start, off_t offset, int len) #endif { off_t pos = 0, begin = 0; int l, i; unsigned long flags; l = sprintf(buf, "%-3s %-10s %-10s %-16s %-7s %-5s %-4s %-7s %-10s %-10s %-10s %-10s\n", "dev", "name", "class", "state", "dev", "flags", "use", "busy", "rxpkt", "rx_b", "txpkt", "tx_b" ); local_irq_save(flags); local_irq_disable(); for (i=0; ilink; l += sprintf(buf+l, "%-3d %-10s %-10s %-16s %-7s %5x %4d %7lx %10lu %10lu %10lu %10lu\n", i, ch->device_name, ch->dev_class, ch->state > CS_UNAVAIL ? "unknown" : chanStates[ch->state], call && call->bundle ? call->bundle->name : "none", call ? call->lflags : 0, call ? call->use_count : 0, (long)test_busy(ch), /* must be long due to x86-64 compiler bug. */ (unsigned long)ch->stats.rx_packets, (unsigned long)ch->CH_rx_bytes, (unsigned long)ch->stats.tx_packets, (unsigned long)ch->CH_tx_bytes ); pos = begin + l; if (pos < offset) { l = 0; begin = pos; } if (pos > offset + len) break; } local_irq_restore(flags); *start = buf + (offset - begin); l -= (offset - begin); if(l > len) l = len; return l; } #if LINUX_VERSION_CODE < 0x2031B static int proc_interface_getinfo(char *buf, char **start, off_t offset, int len, int unused) #else static int proc_interface_getinfo(char *buf, char **start, off_t offset, int len) #endif { off_t pos = 0, begin = 0; int l = 0, i; l = sprintf(buf, "%-7s %-4s %-7s %-5s %-5s %s\n", "dev", "up", "mpFLen", "flags", "pause", "channels" ); for (i=0; iname, b->dev.flags & IFF_UP ? "yes" : "no", b->num_frags, b->lflags, #if LINUX_VERSION_CODE < 0x02032B b->dev.tbusy ? "yes" : "no"); #else netif_queue_stopped(&b->dev) ? "yes" : "no"); #endif chan = b->chan; do { if (!chan) break; l += sprintf(buf+l, " %s", chan->ch->device_name); chan = chan->next; } while (chan != b->chan) ; local_irq_enable(); buf[l++] = '\n'; pos = begin + l; if (pos < offset) { l = 0; begin = pos; } if (pos > offset + len) break; } *start = buf + (offset - begin); l -= (offset - begin); if (l > len) l = len; return l; } #if LINUX_VERSION_CODE < 0x2031B static int proc_call_getinfo(char *buf, char **start, off_t offset, int len, int unused) #else static int proc_call_getinfo(char *buf, char **start, off_t offset, int len) #endif { off_t pos = 0, begin = 0; int l = 0, i; l = sprintf(buf, "i use idx ch\n"); for (i=0; iuse_count, call->idx, call->ch ? call->ch->device_name : "none"); buf[l++] = '\n'; local_irq_enable(); pos = begin + l; if (pos < offset) { l = 0; begin = pos; } if (pos > offset + len) break; } *start = buf + (offset - begin); l -= (offset - begin); if (l > len) l = len; return l; } #if LINUX_VERSION_CODE < 0x2031B static int proc_mpq_getinfo(char *buf, char **start, off_t offset, int len, int unused) #else static int proc_mpq_getinfo(char *buf, char **start, off_t offset, int len) #endif { off_t pos = 0, begin = 0; int l = 0, i, j; struct sk_buff *skb; for (i=0; iname); buf[l++] = '\n'; for (j=0; jfrags[j]; skb; skb=skb->next) { l += sprintf(buf+l, "%08x\n", *(u32 *)skb->data); pos = begin + l; if (pos < offset) { l = 0; begin = pos; } if (pos > offset + len) break; } local_irq_enable(); } } *start = buf + (offset - begin); l -= (offset - begin); if (l > len) l = len; return l; } #if LINUX_VERSION_CODE < 0x2031B static struct proc_dir_entry proc_babylon_channels = { 0, 8, "bab_chan", S_IFREG | S_IRUGO, 1, 0, 0, 0, &proc_net_inode_operations, proc_channel_getinfo, 0, 0, 0, 0 }; static struct proc_dir_entry proc_babylon_interfaces = { 0, 7, "bab_dev", S_IFREG | S_IRUGO, 1, 0, 0, 0, &proc_net_inode_operations, proc_interface_getinfo, 0, 0, 0, 0 }; static struct proc_dir_entry proc_babylon_mpq = { 0, 7, "bab_mpq", S_IFREG | S_IRUGO, 1, 0, 0, 0, &proc_net_inode_operations, proc_mpq_getinfo, 0, 0, 0, 0 }; static struct proc_dir_entry proc_babylon_call = { 0, 8, "bab_call", S_IFREG | S_IRUGO, 1, 0, 0, 0, &proc_net_inode_operations, proc_call_getinfo, 0, 0, 0, 0 }; #endif static char devName[] = "bab"; static char bpppName[] = "bppp"; static struct packet_type ppp_pt = { .type = ETH_P_PPP, .func = b_input, }; int init_module(void) { int ret; printk(KERN_INFO "Babylon v" VER " Copyright 1999 Spellcast Telecommunications Inc.\n"); skb_queue_head_init(&rx_q); chan_cachep = kmem_cache_create("babylon chan", sizeof(struct chan), 0, 0, NULL, NULL); if (!chan_cachep) return -ENOMEM; #if LINUX_VERSION_CODE < 0x2031B proc_register(&proc_root, &proc_babylon_channels); proc_register(&proc_root, &proc_babylon_interfaces); proc_register(&proc_root, &proc_babylon_mpq); proc_register(&proc_root, &proc_babylon_call); #else create_proc_info_entry("bab_chan", 0, &proc_root, proc_channel_getinfo); create_proc_info_entry("bab_dev", 0, &proc_root, proc_interface_getinfo); create_proc_info_entry("bab_mpq", 0, &proc_root, proc_mpq_getinfo); create_proc_info_entry("bab_call", 0, &proc_root, proc_call_getinfo); #endif ret = register_chrdev(BAB_MAJOR, devName, &b_fops); ret = register_chrdev(BPPP_MAJOR, bpppName, &bppp_fops); if (!ret) dev_add_pack(&ppp_pt); return ret; } void cleanup_module(void) { int i; for (i=0; i= KERNEL_VERSION(2,5,0)) EXPORT_SYMBOL(RegisterChannel); EXPORT_SYMBOL(UnregisterChannel); EXPORT_SYMBOL(ch_ioctl); EXPORT_SYMBOL(ch_Input); #endif MODULE_LICENSE("GPL");