/linux-2.4.37.9/net/ipv4/ipvs/ |
D | ip_vs_est.c | 70 struct ip_vs_estimator *e; in estimation_timer() local 78 for (e = est_list; e; e = e->next) { in estimation_timer() 79 s = e->stats; in estimation_timer() 89 rate = (n_conns - e->last_conns)<<9; in estimation_timer() 90 e->last_conns = n_conns; in estimation_timer() 91 e->cps += ((long)rate - (long)e->cps)>>2; in estimation_timer() 92 s->cps = (e->cps+0x1FF)>>10; in estimation_timer() 94 rate = (n_inpkts - e->last_inpkts)<<9; in estimation_timer() 95 e->last_inpkts = n_inpkts; in estimation_timer() 96 e->inpps += ((long)rate - (long)e->inpps)>>2; in estimation_timer() [all …]
|
D | ip_vs_lblcr.c | 101 struct ip_vs_dest_list *e; in ip_vs_dest_set_insert() local 103 for (e=set->list; e!=NULL; e=e->next) { in ip_vs_dest_set_insert() 104 if (e->dest == dest) in ip_vs_dest_set_insert() 109 e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC); in ip_vs_dest_set_insert() 110 if (e == NULL) { in ip_vs_dest_set_insert() 116 e->dest = dest; in ip_vs_dest_set_insert() 120 e->next = set->list; in ip_vs_dest_set_insert() 121 set->list = e; in ip_vs_dest_set_insert() 126 return e; in ip_vs_dest_set_insert() 132 struct ip_vs_dest_list *e, **ep; in ip_vs_dest_set_erase() local [all …]
|
D | ip_vs_wrr.c | 55 register struct list_head *l, *e; in ip_vs_wrr_gcd_weight() local 61 for (e=l->next; e!=l; e=e->next) { in ip_vs_wrr_gcd_weight() 62 dest = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_wrr_gcd_weight() 69 if (e == l) in ip_vs_wrr_gcd_weight() 72 for (e=e->next; e!=l; e=e->next) { in ip_vs_wrr_gcd_weight() 73 dest = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_wrr_gcd_weight() 88 register struct list_head *l, *e; in ip_vs_wrr_max_weight() local 93 for (e=l->next; e!=l; e=e->next) { in ip_vs_wrr_max_weight() 94 dest = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_wrr_max_weight()
|
D | ip_vs_lblc.c | 245 struct list_head *l,*e; in ip_vs_lblc_get() local 252 for (e=l->next; e!=l; e=e->next) { in ip_vs_lblc_get() 253 en = list_entry(e, struct ip_vs_lblc_entry, list); in ip_vs_lblc_get() 293 struct list_head *l, *e; in ip_vs_lblc_full_check() local 298 e = l = &tbl->bucket[j]; in ip_vs_lblc_full_check() 300 while (e->next != l) { in ip_vs_lblc_full_check() 301 en = list_entry(e->next, in ip_vs_lblc_full_check() 305 e = e->next; in ip_vs_lblc_full_check() 334 struct list_head *l, *e; in ip_vs_lblc_check_expire() local 357 e = l = &tbl->bucket[j]; in ip_vs_lblc_check_expire() [all …]
|
D | ip_vs_lc.c | 64 struct list_head *l, *e; in ip_vs_lc_schedule() local 80 for (e=l->next; e!=l; e=e->next) { in ip_vs_lc_schedule() 81 least = list_entry (e, struct ip_vs_dest, n_list); in ip_vs_lc_schedule() 93 for (e=e->next; e!=l; e=e->next) { in ip_vs_lc_schedule() 94 dest = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_lc_schedule()
|
D | ip_vs_sed.c | 84 register struct list_head *l, *e; in ip_vs_sed_schedule() local 104 for (e=l->next; e!=l; e=e->next) { in ip_vs_sed_schedule() 105 least = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_sed_schedule() 117 for (e=e->next; e!=l; e=e->next) { in ip_vs_sed_schedule() 118 dest = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_sed_schedule()
|
D | ip_vs_wlc.c | 72 register struct list_head *l, *e; in ip_vs_wlc_schedule() local 92 for (e=l->next; e!=l; e=e->next) { in ip_vs_wlc_schedule() 93 least = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_wlc_schedule() 105 for (e=e->next; e!=l; e=e->next) { in ip_vs_wlc_schedule() 106 dest = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_wlc_schedule()
|
D | ip_vs_nq.c | 80 register struct list_head *l, *e; in ip_vs_nq_schedule() local 100 for (e=l->next; e!=l; e=e->next) { in ip_vs_nq_schedule() 101 least = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_nq_schedule() 118 for (e=e->next; e!=l; e=e->next) { in ip_vs_nq_schedule() 119 dest = list_entry(e, struct ip_vs_dest, n_list); in ip_vs_nq_schedule()
|
/linux-2.4.37.9/crypto/ |
D | sha1.c | 61 u32 a, b, c, d, e; in sha1_transform() local 73 e = state[4]; in sha1_transform() 76 R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3); in sha1_transform() 77 R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7); in sha1_transform() 78 R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11); in sha1_transform() 79 R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15); in sha1_transform() 80 R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19); in sha1_transform() 81 R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23); in sha1_transform() 82 R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27); in sha1_transform() 83 R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31); in sha1_transform() [all …]
|
D | sha256.c | 84 u32 a, b, c, d, e, f, g, h, t1, t2; in sha256_transform() local 98 e=state[4]; f=state[5]; g=state[6]; h=state[7]; in sha256_transform() 101 t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0]; in sha256_transform() 103 t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1]; in sha256_transform() 105 t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2]; in sha256_transform() 107 t1 = e + e1(b) + Ch(b,c,d) + 0xe9b5dba5 + W[ 3]; in sha256_transform() 108 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; in sha256_transform() 110 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; in sha256_transform() 112 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; in sha256_transform() 114 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; in sha256_transform() [all …]
|
/linux-2.4.37.9/fs/ |
D | binfmt_misc.c | 64 Node *e = list_entry(l, Node, list); in check_file() local 68 if (!test_bit(Enabled, &e->flags)) in check_file() 71 if (!test_bit(Magic, &e->flags)) { in check_file() 72 if (p && !strcmp(e->magic, p + 1)) in check_file() 73 return e; in check_file() 77 s = bprm->buf + e->offset; in check_file() 78 if (e->mask) { in check_file() 79 for (j = 0; j < e->size; j++) in check_file() 80 if ((*s++ ^ e->magic[j]) & e->mask[j]) in check_file() 83 for (j = 0; j < e->size; j++) in check_file() [all …]
|
/linux-2.4.37.9/drivers/net/skfp/ |
D | ecm.c | 109 smc->e.path_test = PT_PASSED ; 110 smc->e.trace_prop = 0 ; 111 smc->e.sb_flag = 0 ; 113 smc->e.ecm_line_state = FALSE ; 169 smc->e.DisconnectFlag = FALSE ; 172 smc->e.DisconnectFlag = TRUE ; 180 smc->e.path_test = PT_PASSED ; 181 smc->e.ecm_line_state = FALSE ; 188 && smc->e.path_test==PT_PASSED) { 193 else if (cmd == EC_CONNECT && (smc->e.path_test==PT_PASSED) && [all …]
|
/linux-2.4.37.9/lib/zlib_inflate/ |
D | inffast.c | 36 uInt e; /* extra bits or operation */ local 60 if ((e = (t = tl + ((uInt)b & ml))->exop) == 0) 69 if (e & 16) 72 e &= 15; 73 c = t->base + ((uInt)b & zlib_inflate_mask[e]); 74 DUMPBITS(e) 78 e = (t = td + ((uInt)b & md))->exop; 81 if (e & 16) 84 e &= 15; 85 GRABBITS(e) /* get extra bits (up to 13) */ [all …]
|
/linux-2.4.37.9/drivers/isdn/eicon/ |
D | idi.c | 23 static void request(card_t *card, ENTITY *e); 25 static void req_0(ENTITY *e) { request(&DivasCards[ 0], e); } in req_0() argument 26 static void req_1(ENTITY *e) { request(&DivasCards[ 1], e); } in req_1() argument 27 static void req_2(ENTITY *e) { request(&DivasCards[ 2], e); } in req_2() argument 28 static void req_3(ENTITY *e) { request(&DivasCards[ 3], e); } in req_3() argument 29 static void req_4(ENTITY *e) { request(&DivasCards[ 4], e); } in req_4() argument 30 static void req_5(ENTITY *e) { request(&DivasCards[ 5], e); } in req_5() argument 31 static void req_6(ENTITY *e) { request(&DivasCards[ 6], e); } in req_6() argument 32 static void req_7(ENTITY *e) { request(&DivasCards[ 7], e); } in req_7() argument 33 static void req_8(ENTITY *e) { request(&DivasCards[ 8], e); } in req_8() argument [all …]
|
/linux-2.4.37.9/lib/ |
D | inflate.c | 131 uch e; /* number of extra bits or operation */ member 274 STATIC int huft_build(b, n, s, d, e, t, m) in huft_build() argument 279 const ush *e; /* list of extra bits for non-simple codes */ 439 r.e = (uch)(16 + j); /* bits in this table */ 451 r.e = 99; /* out of values--invalid code */ 454 r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */ 460 r.e = (uch)e[*p - s]; /* non-simple--look up in lists */ 521 register unsigned e; /* table entry flag/number of extra bits */ local 541 if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) 543 if (e == 99) [all …]
|
/linux-2.4.37.9/net/sched/ |
D | estimator.c | 102 struct qdisc_estimator *e; in est_timer() local 105 for (e = elist[idx].list; e; e = e->next) { in est_timer() 106 struct tc_stats *st = e->stats; in est_timer() 114 rate = (nbytes - e->last_bytes)<<(7 - idx); in est_timer() 115 e->last_bytes = nbytes; in est_timer() 116 e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log; in est_timer() 117 st->bps = (e->avbps+0xF)>>5; in est_timer() 119 rate = (npackets - e->last_packets)<<(12 - idx); in est_timer() 120 e->last_packets = npackets; in est_timer() 121 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; in est_timer() [all …]
|
/linux-2.4.37.9/drivers/net/sk98lin/h/ |
D | skgepnm2.h | 204 #define SK_PNMI_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STRUCT_DATA *)0)->e)) argument 205 #define SK_PNMI_MAI_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STRUCT_DATA *)0)->e)) argument 206 #define SK_PNMI_VPD_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_VPD *)0)->e)) argument 207 #define SK_PNMI_SEN_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_SENSOR *)0)->e)) argument 208 #define SK_PNMI_CHK_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_CHECKSUM *)0)->e)) argument 209 #define SK_PNMI_STA_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STAT *)0)->e)) argument 210 #define SK_PNMI_CNF_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_CONF *)0)->e)) argument 211 #define SK_PNMI_RLM_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_RLMT *)0)->e)) argument 212 #define SK_PNMI_MON_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_RLMT_MONITOR *)0)->e)) argument 213 #define SK_PNMI_TRP_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_TRAP *)0)->e)) argument
|
/linux-2.4.37.9/drivers/nubus/ |
D | proc.c | 73 struct proc_dir_entry* e; in nubus_proc_subdir() local 76 e = create_proc_entry(name, S_IFREG | S_IRUGO | in nubus_proc_subdir() 78 if (!e) return; in nubus_proc_subdir() 94 struct proc_dir_entry* e; in nubus_proc_populate() local 98 e = proc_mkdir(name, parent); in nubus_proc_populate() 99 if (!e) return; in nubus_proc_populate() 108 nubus_proc_subdir(dev, e, &dir); in nubus_proc_populate() 115 struct proc_dir_entry *e; in nubus_proc_attach_device() local 134 e = dev->procdir = proc_mkdir(name, proc_bus_nubus_dir); in nubus_proc_attach_device() 135 if (!e) in nubus_proc_attach_device() [all …]
|
/linux-2.4.37.9/include/asm-ia64/sn/ |
D | ioerror.h | 163 #define IOERROR_INIT(e) do { (e)->ie_v.iev_a = 0; } while (0) argument 164 #define IOERROR_SETVALUE(e,f,v) do { (e)->ie_ ## f = (v); (e)->ie_v.iev_b.ievb_ ## f = 1; } while (… argument 165 #define IOERROR_FIELDVALID(e,f) ((unsigned long long)((e)->ie_v.iev_b.ievb_ ## f) != (unsigned long… argument 166 #define IOERROR_NOGETVALUE(e,f) (ASSERT(IOERROR_FIELDVALID(e,f)), ((e)->ie_ ## f)) argument 167 #define IOERROR_GETVALUE(p,e,f) ASSERT(IOERROR_FIELDVALID(e,f)); p=((e)->ie_ ## f) argument
|
/linux-2.4.37.9/net/ipv4/netfilter/ |
D | arp_tables.c | 254 struct arpt_entry *e, *back; in arpt_do_table() local 265 e = get_entry(table_base, table->private->hook_entry[hook]); in arpt_do_table() 269 if (arp_packet_match(arp, (*pskb)->dev, indev, outdev, &e->arp)) { in arpt_do_table() 275 ADD_COUNTER(e->counters, hdr_len, 1); in arpt_do_table() 277 t = arpt_get_target(e); in arpt_do_table() 290 e = back; in arpt_do_table() 296 != (void *)e + e->next_offset) { in arpt_do_table() 299 = (void *)e + e->next_offset; in arpt_do_table() 307 e = get_entry(table_base, v); in arpt_do_table() 322 e = (void *)e + e->next_offset; in arpt_do_table() [all …]
|
D | ip_tables.c | 271 struct ipt_entry *e, *back; in ipt_do_table() local 292 e = get_entry(table_base, table->private->hook_entry[hook]); in ipt_do_table() 311 IP_NF_ASSERT(e); in ipt_do_table() 313 (*pskb)->nfcache |= e->nfcache; in ipt_do_table() 314 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) { in ipt_do_table() 317 if (IPT_MATCH_ITERATE(e, do_match, in ipt_do_table() 323 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); in ipt_do_table() 325 t = ipt_get_target(e); in ipt_do_table() 338 e = back; in ipt_do_table() 344 != (void *)e + e->next_offset) { in ipt_do_table() [all …]
|
/linux-2.4.37.9/include/asm-i386/ |
D | pgtable-3level.h | 29 #define pte_ERROR(e) \ argument 30 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) 31 #define pmd_ERROR(e) \ argument 32 printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) 33 #define pgd_ERROR(e) \ argument 34 printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
|
/linux-2.4.37.9/net/ipv6/netfilter/ |
D | ip6_tables.c | 360 struct ip6t_entry *e, *back; in ip6t_do_table() local 378 e = get_entry(table_base, table->private->hook_entry[hook]); in ip6t_do_table() 397 IP_NF_ASSERT(e); in ip6t_do_table() 399 (*pskb)->nfcache |= e->nfcache; in ip6t_do_table() 400 if (ip6_packet_match(*pskb, indev, outdev, &e->ipv6, in ip6t_do_table() 404 if (IP6T_MATCH_ITERATE(e, do_match, in ip6t_do_table() 413 ADD_COUNTER(e->counters, ntohs((*pskb)->nh.ipv6h->payload_len) + IPV6_HDR_LEN, 1); in ip6t_do_table() 415 t = ip6t_get_target(e); in ip6t_do_table() 428 e = back; in ip6t_do_table() 434 != (void *)e + e->next_offset) { in ip6t_do_table() [all …]
|
/linux-2.4.37.9/net/ipv4/ |
D | ipconfig.c | 547 u8 *e = options; in ic_dhcp_init_options() local 553 memcpy(e, ic_bootp_cookie, 4); /* RFC1048 Magic Cookie */ in ic_dhcp_init_options() 554 e += 4; in ic_dhcp_init_options() 556 *e++ = 53; /* DHCP message type */ in ic_dhcp_init_options() 557 *e++ = 1; in ic_dhcp_init_options() 558 *e++ = mt; in ic_dhcp_init_options() 561 *e++ = 54; /* Server ID (IP address) */ in ic_dhcp_init_options() 562 *e++ = 4; in ic_dhcp_init_options() 563 memcpy(e, &ic_servaddr, 4); in ic_dhcp_init_options() 564 e += 4; in ic_dhcp_init_options() [all …]
|
/linux-2.4.37.9/include/asm-arm/ |
D | cpu-multi32.h | 139 #define cpu_cache_clean_invalidate_range(s,e,f) processor.cache.clean_invalidate_range(s,e,f) argument 144 #define cpu_dcache_clean_range(s,e) processor.dcache.clean_range(s,e) argument 145 #define cpu_dcache_invalidate_range(s,e) processor.dcache.invalidate_range(s,e) argument 147 #define cpu_icache_invalidate_range(s,e) processor.icache.invalidate_range(s,e) argument 151 #define cpu_tlb_invalidate_range(s,e) processor.tlb.invalidate_range(s,e) argument
|