Home
last modified time | relevance | path

Searched refs:skb (Results 1 – 25 of 834) sorted by relevance

12345678910>>...34

/linux-2.4.37.9/drivers/isdn/pcbit/
Dcapi.c56 int capi_conn_req(const char * calledPN, struct sk_buff **skb, int proto) in capi_conn_req() argument
81 if ((*skb = dev_alloc_skb(len)) == NULL) { in capi_conn_req()
88 *((ushort*) skb_put(*skb, 2)) = AppInfoMask; in capi_conn_req()
93 *(skb_put(*skb, 1)) = 3; /* BC0.Length */ in capi_conn_req()
94 *(skb_put(*skb, 1)) = 0x80; /* Speech */ in capi_conn_req()
95 *(skb_put(*skb, 1)) = 0x10; /* Circuit Mode */ in capi_conn_req()
96 *(skb_put(*skb, 1)) = 0x23; /* A-law */ in capi_conn_req()
101 *(skb_put(*skb, 1)) = 2; /* BC0.Length */ in capi_conn_req()
102 *(skb_put(*skb, 1)) = 0x88; /* Digital Information */ in capi_conn_req()
103 *(skb_put(*skb, 1)) = 0x90; /* BC0.Octect4 */ in capi_conn_req()
[all …]
/linux-2.4.37.9/include/linux/
Dskbuff.h232 extern void __kfree_skb(struct sk_buff *skb);
234 extern void kfree_skbmem(struct sk_buff *skb);
235 extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
236 extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority);
237 extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask);
238 extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
239 extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
240 extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb,
244 extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
246 extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
[all …]
Dif_vlan.h145 static inline int __vlan_hwaccel_rx(struct sk_buff *skb, in __vlan_hwaccel_rx() argument
151 skb->real_dev = skb->dev; in __vlan_hwaccel_rx()
152 skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; in __vlan_hwaccel_rx()
153 if (skb->dev == NULL) { in __vlan_hwaccel_rx()
154 kfree_skb(skb); in __vlan_hwaccel_rx()
162 skb->dev->last_rx = jiffies; in __vlan_hwaccel_rx()
164 stats = vlan_dev_get_stats(skb->dev); in __vlan_hwaccel_rx()
166 stats->rx_bytes += skb->len; in __vlan_hwaccel_rx()
168 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag); in __vlan_hwaccel_rx()
169 switch (skb->pkt_type) { in __vlan_hwaccel_rx()
[all …]
/linux-2.4.37.9/net/core/
Dskbuff.c88 void skb_over_panic(struct sk_buff *skb, int sz, void *here) in skb_over_panic() argument
91 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>"); in skb_over_panic()
105 void skb_under_panic(struct sk_buff *skb, int sz, void *here) in skb_under_panic() argument
108 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>"); in skb_under_panic()
117 struct sk_buff *skb; in skb_head_from_pool() local
121 skb = __skb_dequeue(list); in skb_head_from_pool()
123 return skb; in skb_head_from_pool()
128 static __inline__ void skb_head_to_pool(struct sk_buff *skb) in skb_head_to_pool() argument
136 __skb_queue_head(list, skb); in skb_head_to_pool()
141 kmem_cache_free(skbuff_head_cache, skb); in skb_head_to_pool()
[all …]
/linux-2.4.37.9/drivers/net/wireless/prism54/
Dislpci_eth.c42 struct sk_buff *skb; in islpci_eth_cleanup_transmit() local
57 skb = priv->data_low_tx[index]; in islpci_eth_cleanup_transmit()
62 skb, skb->data, skb->len, skb->truesize); in islpci_eth_cleanup_transmit()
67 skb->len, PCI_DMA_TODEVICE); in islpci_eth_cleanup_transmit()
68 dev_kfree_skb_irq(skb); in islpci_eth_cleanup_transmit()
69 skb = NULL; in islpci_eth_cleanup_transmit()
77 islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) in islpci_eth_transmit() argument
102 frame_size = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; in islpci_eth_transmit()
124 if (likely(((long) skb->data & 0x03) | init_wds)) { in islpci_eth_transmit()
126 offset = (4 - (long) skb->data) & 0x03; in islpci_eth_transmit()
[all …]
/linux-2.4.37.9/net/ipv6/
Dip6_input.c45 static inline int ip6_rcv_finish( struct sk_buff *skb) in ip6_rcv_finish() argument
47 if (skb->dst == NULL) in ip6_rcv_finish()
48 ip6_route_input(skb); in ip6_rcv_finish()
50 return skb->dst->input(skb); in ip6_rcv_finish()
53 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) in ipv6_rcv() argument
58 if (skb->pkt_type == PACKET_OTHERHOST) in ipv6_rcv()
63 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) in ipv6_rcv()
69 ((struct inet6_skb_parm *)skb->cb)->iif = dev->ifindex; in ipv6_rcv()
71 if (skb->len < sizeof(struct ipv6hdr)) in ipv6_rcv()
74 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) in ipv6_rcv()
[all …]
Dexthdrs.c80 int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) in ip6_tlvopt_unknown() argument
82 switch ((skb->nh.raw[optoff] & 0xC0) >> 6) { in ip6_tlvopt_unknown()
93 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) in ip6_tlvopt_unknown()
96 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); in ip6_tlvopt_unknown()
100 kfree_skb(skb); in ip6_tlvopt_unknown()
106 static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) in ip6_parse_tlv() argument
109 int off = skb->h.raw - skb->nh.raw; in ip6_parse_tlv()
110 int len = ((skb->h.raw[1]+1)<<3); in ip6_parse_tlv()
112 if ((skb->h.raw + len) - skb->data > skb_headlen(skb)) in ip6_parse_tlv()
119 int optlen = skb->nh.raw[off+1]+2; in ip6_parse_tlv()
[all …]
Dip6_output.c53 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr) in ipv6_select_ident() argument
65 static inline int ip6_output_finish(struct sk_buff *skb) in ip6_output_finish() argument
68 struct dst_entry *dst = skb->dst; in ip6_output_finish()
76 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); in ip6_output_finish()
78 skb_push(skb, hh->hh_len); in ip6_output_finish()
79 return hh->hh_output(skb); in ip6_output_finish()
81 return dst->neighbour->output(skb); in ip6_output_finish()
83 kfree_skb(skb); in ip6_output_finish()
102 int ip6_output(struct sk_buff *skb) in ip6_output() argument
104 struct dst_entry *dst = skb->dst; in ip6_output()
[all …]
/linux-2.4.37.9/net/lapb/
Dlapb_subr.c52 struct sk_buff *skb; in lapb_frames_acked() local
62 skb = skb_dequeue(&lapb->ack_queue); in lapb_frames_acked()
63 kfree_skb(skb); in lapb_frames_acked()
71 struct sk_buff *skb, *skb_prev = NULL; in lapb_requeue_frames() local
78 while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL) { in lapb_requeue_frames()
80 skb_queue_head(&lapb->write_queue, skb); in lapb_requeue_frames()
82 skb_append(skb_prev, skb); in lapb_requeue_frames()
83 skb_prev = skb; in lapb_requeue_frames()
112 void lapb_decode(lapb_cb *lapb, struct sk_buff *skb, struct lapb_frame *frame) in lapb_decode() argument
117 …pb: (%p) S%d RX %02X %02X %02X\n", lapb->token, lapb->state, skb->data[0], skb->data[1], skb->data… in lapb_decode()
[all …]
/linux-2.4.37.9/net/ax25/
Dax25_in.c71 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) in ax25_rx_fragment() argument
76 if (!(*skb->data & AX25_SEG_FIRST)) { in ax25_rx_fragment()
77 if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) { in ax25_rx_fragment()
79 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment()
80 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment()
81 ax25->fraglen += skb->len; in ax25_rx_fragment()
82 skb_queue_tail(&ax25->frag_queue, skb); in ax25_rx_fragment()
117 if (*skb->data & AX25_SEG_FIRST) { in ax25_rx_fragment()
119 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment()
120 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment()
[all …]
Dax25_out.c60 ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax… in ax25_send_frame() argument
80 ax25_output(ax25, paclen, skb); in ax25_send_frame()
125 ax25_output(ax25, paclen, skb); in ax25_send_frame()
136 void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) in ax25_output() argument
143 if ((skb->len - 1) > paclen) { in ax25_output()
144 if (*skb->data == AX25_P_TEXT) { in ax25_output()
145 skb_pull(skb, 1); /* skip PID */ in ax25_output()
152 fragno = skb->len / paclen; in ax25_output()
153 if (skb->len % paclen == 0) fragno--; in ax25_output()
155 frontlen = skb_headroom(skb); /* Address space + CTRL */ in ax25_output()
[all …]
/linux-2.4.37.9/net/irda/
Dirlap_frame.c54 struct sk_buff *skb) in irlap_insert_info() argument
56 struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb; in irlap_insert_info()
89 void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) in irlap_queue_xmit() argument
92 skb->dev = self->netdev; in irlap_queue_xmit()
93 skb->h.raw = skb->nh.raw = skb->mac.raw = skb->data; in irlap_queue_xmit()
94 skb->protocol = htons(ETH_P_IRDA); in irlap_queue_xmit()
95 skb->priority = TC_PRIO_BESTEFFORT; in irlap_queue_xmit()
97 irlap_insert_info(self, skb); in irlap_queue_xmit()
99 dev_queue_xmit(skb); in irlap_queue_xmit()
109 struct sk_buff *skb; in irlap_send_snrm_frame() local
[all …]
Diriap_event.c32 struct sk_buff *skb);
34 struct sk_buff *skb);
36 struct sk_buff *skb);
39 struct sk_buff *skb);
41 struct sk_buff *skb);
43 struct sk_buff *skb);
45 struct sk_buff *skb);
47 struct sk_buff *skb);
49 struct sk_buff *skb);
52 struct sk_buff *skb);
[all …]
/linux-2.4.37.9/net/ipv4/
Dip_input.c156 int ip_call_ra_chain(struct sk_buff *skb) in ip_call_ra_chain() argument
159 u8 protocol = skb->nh.iph->protocol; in ip_call_ra_chain()
171 || (sk->bound_dev_if == skb->dev->ifindex))) { in ip_call_ra_chain()
172 if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { in ip_call_ra_chain()
173 skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN); in ip_call_ra_chain()
174 if (skb == NULL) { in ip_call_ra_chain()
180 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain()
189 raw_rcv(last, skb); in ip_call_ra_chain()
198 static int ip_run_ipprot(struct sk_buff *skb, struct iphdr *iph, in ip_run_ipprot() argument
205 struct sk_buff *skb2 = skb; in ip_run_ipprot()
[all …]
Dtcp_output.c48 void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) in update_send_head() argument
50 tp->send_head = skb->next; in update_send_head()
53 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in update_send_head()
121 static __inline__ void tcp_event_data_sent(struct tcp_opt *tp, struct sk_buff *skb) in tcp_event_data_sent() argument
196 int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) in tcp_transmit_skb() argument
198 if(skb != NULL) { in tcp_transmit_skb()
200 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in tcp_transmit_skb()
246 th = (struct tcphdr *) skb_push(skb, tcp_header_size); in tcp_transmit_skb()
247 skb->h.th = th; in tcp_transmit_skb()
248 skb_set_owner_w(skb, sk); in tcp_transmit_skb()
[all …]
Dip_forward.c44 static inline int ip_forward_finish(struct sk_buff *skb) in ip_forward_finish() argument
46 struct ip_options * opt = &(IPCB(skb)->opt); in ip_forward_finish()
52 struct rtable *rt = (struct rtable*)skb->dst; in ip_forward_finish()
58 write_lock_irq(&skb->dev->fastpath_lock); in ip_forward_finish()
59 old_dst = skb->dev->fastpath[h]; in ip_forward_finish()
60 skb->dev->fastpath[h] = dst_clone(&rt->u.dst); in ip_forward_finish()
61 write_unlock_irq(&skb->dev->fastpath_lock); in ip_forward_finish()
66 return (ip_send(skb)); in ip_forward_finish()
69 ip_forward_options(skb); in ip_forward_finish()
70 return (ip_send(skb)); in ip_forward_finish()
[all …]
Dip_output.c114 output_maybe_reroute(struct sk_buff *skb) in output_maybe_reroute() argument
116 return skb->dst->output(skb); in output_maybe_reroute()
122 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, in ip_build_and_send_pkt() argument
125 struct rtable *rt = (struct rtable *)skb->dst; in ip_build_and_send_pkt()
130 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen); in ip_build_and_send_pkt()
132 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr)); in ip_build_and_send_pkt()
145 iph->tot_len = htons(skb->len); in ip_build_and_send_pkt()
147 skb->nh.iph = iph; in ip_build_and_send_pkt()
151 ip_options_build(skb, opt, daddr, rt, 0); in ip_build_and_send_pkt()
156 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, in ip_build_and_send_pkt()
[all …]
Dipmr.c107 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
109 static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
169 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) in reg_vif_xmit() argument
172 ((struct net_device_stats*)dev->priv)->tx_bytes += skb->len; in reg_vif_xmit()
174 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); in reg_vif_xmit()
176 kfree_skb(skb); in reg_vif_xmit()
292 struct sk_buff *skb; in ipmr_destroy_unres() local
297 while((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) { in ipmr_destroy_unres()
298 if (skb->nh.iph->version == 0) { in ipmr_destroy_unres()
299 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); in ipmr_destroy_unres()
[all …]
/linux-2.4.37.9/net/x25/
Dx25_dev.c48 static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *neigh) in x25_receive_data() argument
54 frametype = skb->data[2]; in x25_receive_data()
55 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); in x25_receive_data()
62 x25_link_control(skb, neigh, frametype); in x25_receive_data()
72 skb->h.raw = skb->data; in x25_receive_data()
75 queued = x25_process_rx_frame(sk, skb); in x25_receive_data()
77 sk_add_backlog(sk, skb); in x25_receive_data()
87 return x25_rx_call_request(skb, neigh, lci); in x25_receive_data()
101 int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype) in x25_lapb_receive_frame() argument
106 skb->sk = NULL; in x25_lapb_receive_frame()
[all …]
/linux-2.4.37.9/net/bridge/
Dbr_forward.c24 static inline int should_deliver(struct net_bridge_port *p, struct sk_buff *skb) in should_deliver() argument
26 if (skb->dev == p->dev || in should_deliver()
33 static int __dev_queue_push_xmit(struct sk_buff *skb) in __dev_queue_push_xmit() argument
35 skb_push(skb, ETH_HLEN); in __dev_queue_push_xmit()
36 dev_queue_xmit(skb); in __dev_queue_push_xmit()
41 static int __br_forward_finish(struct sk_buff *skb) in __br_forward_finish() argument
43 NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, in __br_forward_finish()
49 static void __br_deliver(struct net_bridge_port *to, struct sk_buff *skb) in __br_deliver() argument
51 skb->dev = to->dev; in __br_deliver()
52 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, in __br_deliver()
[all …]
Dbr_input.c25 static int br_pass_frame_up_finish(struct sk_buff *skb) in br_pass_frame_up_finish() argument
27 netif_rx(skb); in br_pass_frame_up_finish()
32 static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) in br_pass_frame_up() argument
37 br->statistics.rx_bytes += skb->len; in br_pass_frame_up()
39 indev = skb->dev; in br_pass_frame_up()
40 skb->dev = &br->dev; in br_pass_frame_up()
41 skb->pkt_type = PACKET_HOST; in br_pass_frame_up()
42 skb_push(skb, ETH_HLEN); in br_pass_frame_up()
43 skb->protocol = eth_type_trans(skb, &br->dev); in br_pass_frame_up()
45 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, in br_pass_frame_up()
[all …]
/linux-2.4.37.9/net/decnet/
Ddn_nsp_in.c81 static void dn_log_martian(struct sk_buff *skb, const char *msg) in dn_log_martian() argument
84 char *devname = skb->dev ? skb->dev->name : "???"; in dn_log_martian()
85 struct dn_skb_cb *cb = DN_SKB_CB(skb); in dn_log_martian()
95 static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack) in dn_ack() argument
105 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->data_xmit_queue, ack); in dn_ack()
113 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->other_xmit_queue, ack); in dn_ack()
127 static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth) in dn_process_ack() argument
129 unsigned short *ptr = (unsigned short *)skb->data; in dn_process_ack()
133 if (skb->len < 2) in dn_process_ack()
137 skb_pull(skb, 2); in dn_process_ack()
[all …]
Ddn_route.c103 static struct dst_entry *dn_dst_reroute(struct dst_entry *, struct sk_buff *skb);
206 struct sk_buff *skb) in dn_dst_reroute() argument
221 static void dn_dst_link_failure(struct sk_buff *skb) in dn_dst_link_failure() argument
306 static int dn_return_short(struct sk_buff *skb) in dn_return_short() argument
315 skb_push(skb, skb->data - skb->nh.raw); in dn_return_short()
317 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) in dn_return_short()
320 cb = DN_SKB_CB(skb); in dn_return_short()
322 ptr = skb->data + 2; in dn_return_short()
336 skb->pkt_type = PACKET_OUTGOING; in dn_return_short()
337 dn_rt_finish_output(skb, NULL); in dn_return_short()
[all …]
Ddn_nsp_out.c84 struct sk_buff *skb; in dn_alloc_skb() local
87 if ((skb = alloc_skb(size + hdr, pri)) == NULL) in dn_alloc_skb()
90 skb->protocol = __constant_htons(ETH_P_DNA_RT); in dn_alloc_skb()
91 skb->pkt_type = PACKET_OUTGOING; in dn_alloc_skb()
94 skb_set_owner_w(skb, sk); in dn_alloc_skb()
96 skb_reserve(skb, hdr); in dn_alloc_skb()
98 return skb; in dn_alloc_skb()
110 struct sk_buff *skb = NULL; in dn_alloc_send_skb() local
114 while(skb == NULL) { in dn_alloc_send_skb()
153 if ((skb = dn_alloc_skb(sk, len, sk->allocation)) == NULL) in dn_alloc_send_skb()
[all …]
/linux-2.4.37.9/drivers/isdn/tpam/
Dtpam_nco.c41 struct sk_buff *skb; in build_NCOpacket() local
52 if (!(skb = alloc_skb(finalsize, GFP_ATOMIC))) { in build_NCOpacket()
58 h = (skb_header *)skb_put(skb, sizeof(skb_header)); in build_NCOpacket()
65 p = (pci_mpb *)skb_put(skb, sizeof(pci_mpb)); in build_NCOpacket()
74 return skb; in build_NCOpacket()
85 struct sk_buff *skb; in build_ACreateNCOReq() local
91 if (!(skb = build_NCOpacket(ID_ACreateNCOReq, 23 + strlen(phone), 0, 0, 0))) in build_ACreateNCOReq()
95 tlv = (u8 *)skb_put(skb, 3); in build_ACreateNCOReq()
100 tlv = (u8 *)skb_put(skb, 4); in build_ACreateNCOReq()
106 tlv = (u8 *)skb_put(skb, 3); in build_ACreateNCOReq()
[all …]

12345678910>>...34