/linux-2.6.39/arch/powerpc/kernel/ |
D | iommu.c | 61 struct iommu_table *tbl, in iommu_range_alloc() argument 88 start = largealloc ? tbl->it_largehint : tbl->it_hint; in iommu_range_alloc() 91 limit = largealloc ? tbl->it_size : tbl->it_halfpoint; in iommu_range_alloc() 93 if (largealloc && start < tbl->it_halfpoint) in iommu_range_alloc() 94 start = tbl->it_halfpoint; in iommu_range_alloc() 101 start = largealloc ? tbl->it_largehint : tbl->it_hint; in iommu_range_alloc() 105 if (limit + tbl->it_offset > mask) { in iommu_range_alloc() 106 limit = mask - tbl->it_offset + 1; in iommu_range_alloc() 124 n = iommu_area_alloc(tbl->it_map, limit, start, npages, in iommu_range_alloc() 125 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, in iommu_range_alloc() [all …]
|
D | dma-iommu.c | 75 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_dma_supported() local 77 if (!tbl) { in dma_iommu_dma_supported() 83 if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { in dma_iommu_dma_supported() 86 mask, (tbl->it_offset + tbl->it_size) << in dma_iommu_dma_supported()
|
/linux-2.6.39/drivers/net/wireless/iwlwifi/ |
D | iwl-agn-rs.c | 430 static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) in get_expected_tpt() argument 432 if (tbl->expected_tpt) in get_expected_tpt() 433 return tbl->expected_tpt[rs_index]; in get_expected_tpt() 444 static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, in rs_collect_tx_data() argument 455 window = &(tbl->win[scale_index]); in rs_collect_tx_data() 458 tpt = get_expected_tpt(tbl, scale_index); in rs_collect_tx_data() 523 struct iwl_scale_tbl_info *tbl, in rate_n_flags_from_tbl() argument 528 if (is_legacy(tbl->lq_type)) { in rate_n_flags_from_tbl() 533 } else if (is_Ht(tbl->lq_type)) { in rate_n_flags_from_tbl() 540 if (is_siso(tbl->lq_type)) in rate_n_flags_from_tbl() [all …]
|
D | iwl-agn-rs.h | 314 #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) argument 315 #define is_siso(tbl) ((tbl) == LQ_SISO) argument 316 #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) argument 317 #define is_mimo3(tbl) ((tbl) == LQ_MIMO3) argument 318 #define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) argument 319 #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) argument 320 #define is_a_band(tbl) ((tbl) == LQ_A) argument 321 #define is_g_and(tbl) ((tbl) == LQ_G) argument
|
/linux-2.6.39/net/core/ |
D | neighbour.c | 61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); 129 static int neigh_forced_gc(struct neigh_table *tbl) in neigh_forced_gc() argument 135 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); in neigh_forced_gc() 137 write_lock_bh(&tbl->lock); in neigh_forced_gc() 138 nht = rcu_dereference_protected(tbl->nht, in neigh_forced_gc() 139 lockdep_is_held(&tbl->lock)); in neigh_forced_gc() 146 lockdep_is_held(&tbl->lock))) != NULL) { in neigh_forced_gc() 156 lockdep_is_held(&tbl->lock))); in neigh_forced_gc() 168 tbl->last_flush = jiffies; in neigh_forced_gc() 170 write_unlock_bh(&tbl->lock); in neigh_forced_gc() [all …]
|
D | sysctl_net_core.c | 198 struct ctl_table *tbl; in sysctl_core_net_init() local 202 tbl = netns_core_table; in sysctl_core_net_init() 204 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); in sysctl_core_net_init() 205 if (tbl == NULL) in sysctl_core_net_init() 208 tbl[0].data = &net->core.sysctl_somaxconn; in sysctl_core_net_init() 212 net_core_path, tbl); in sysctl_core_net_init() 219 if (tbl != netns_core_table) in sysctl_core_net_init() 220 kfree(tbl); in sysctl_core_net_init() 227 struct ctl_table *tbl; in sysctl_core_net_exit() local 229 tbl = net->core.sysctl_hdr->ctl_table_arg; in sysctl_core_net_exit() [all …]
|
/linux-2.6.39/arch/x86/kernel/ |
D | pci-calgary_64.c | 173 static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 174 static void calgary_tce_cache_blast(struct iommu_table *tbl); 175 static void calgary_dump_error_regs(struct iommu_table *tbl); 176 static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 177 static void calioc2_tce_cache_blast(struct iommu_table *tbl); 178 static void calioc2_dump_error_regs(struct iommu_table *tbl); 179 static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl); 196 static inline int translation_enabled(struct iommu_table *tbl) in translation_enabled() argument 199 return (tbl != NULL); in translation_enabled() 202 static void iommu_range_reserve(struct iommu_table *tbl, in iommu_range_reserve() argument [all …]
|
D | tce_64.c | 48 void tce_build(struct iommu_table *tbl, unsigned long index, in tce_build() argument 59 tp = ((u64*)tbl->it_base) + index; in tce_build() 74 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) in tce_free() argument 78 tp = ((u64*)tbl->it_base) + index; in tce_free() 97 static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) in tce_table_setparms() argument 103 tbl->it_busno = dev->bus->number; in tce_table_setparms() 106 tbl->it_size = table_size_to_number_of_entries(specified_table_size); in tce_table_setparms() 112 bitmapsz = tbl->it_size / BITS_PER_BYTE; in tce_table_setparms() 120 tbl->it_map = (unsigned long*)bmppages; in tce_table_setparms() 122 memset(tbl->it_map, 0, bitmapsz); in tce_table_setparms() [all …]
|
/linux-2.6.39/net/netfilter/ipvs/ |
D | ip_vs_lblc.c | 164 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) in ip_vs_lblc_hash() argument 168 list_add(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash() 169 atomic_inc(&tbl->entries); in ip_vs_lblc_hash() 178 ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, in ip_vs_lblc_get() argument 184 list_for_each_entry(en, &tbl->bucket[hash], list) in ip_vs_lblc_get() 197 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, in ip_vs_lblc_new() argument 202 en = ip_vs_lblc_get(dest->af, tbl, daddr); in ip_vs_lblc_new() 217 ip_vs_lblc_hash(tbl, en); in ip_vs_lblc_new() 231 static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) in ip_vs_lblc_flush() argument 237 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { in ip_vs_lblc_flush() [all …]
|
D | ip_vs_dh.c | 88 ip_vs_dh_get(int af, struct ip_vs_dh_bucket *tbl, in ip_vs_dh_get() argument 91 return (tbl[ip_vs_dh_hashkey(af, addr)]).dest; in ip_vs_dh_get() 99 ip_vs_dh_assign(struct ip_vs_dh_bucket *tbl, struct ip_vs_service *svc) in ip_vs_dh_assign() argument 106 b = tbl; in ip_vs_dh_assign() 130 static void ip_vs_dh_flush(struct ip_vs_dh_bucket *tbl) in ip_vs_dh_flush() argument 135 b = tbl; in ip_vs_dh_flush() 148 struct ip_vs_dh_bucket *tbl; in ip_vs_dh_init_svc() local 151 tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE, in ip_vs_dh_init_svc() 153 if (tbl == NULL) { in ip_vs_dh_init_svc() 157 svc->sched_data = tbl; in ip_vs_dh_init_svc() [all …]
|
D | ip_vs_sh.c | 85 ip_vs_sh_get(int af, struct ip_vs_sh_bucket *tbl, in ip_vs_sh_get() argument 88 return (tbl[ip_vs_sh_hashkey(af, addr)]).dest; in ip_vs_sh_get() 96 ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc) in ip_vs_sh_assign() argument 103 b = tbl; in ip_vs_sh_assign() 127 static void ip_vs_sh_flush(struct ip_vs_sh_bucket *tbl) in ip_vs_sh_flush() argument 132 b = tbl; in ip_vs_sh_flush() 145 struct ip_vs_sh_bucket *tbl; in ip_vs_sh_init_svc() local 148 tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE, in ip_vs_sh_init_svc() 150 if (tbl == NULL) { in ip_vs_sh_init_svc() 154 svc->sched_data = tbl; in ip_vs_sh_init_svc() [all …]
|
D | ip_vs_lblcr.c | 335 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) in ip_vs_lblcr_hash() argument 339 list_add(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash() 340 atomic_inc(&tbl->entries); in ip_vs_lblcr_hash() 349 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, in ip_vs_lblcr_get() argument 355 list_for_each_entry(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get() 368 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, in ip_vs_lblcr_new() argument 373 en = ip_vs_lblcr_get(dest->af, tbl, daddr); in ip_vs_lblcr_new() 390 ip_vs_lblcr_hash(tbl, en); in ip_vs_lblcr_new() 404 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl) in ip_vs_lblcr_flush() argument 411 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { in ip_vs_lblcr_flush() [all …]
|
/linux-2.6.39/drivers/net/wireless/iwlegacy/ |
D | iwl-4965-rs.c | 405 iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) in iwl4965_get_expected_tpt() argument 407 if (tbl->expected_tpt) in iwl4965_get_expected_tpt() 408 return tbl->expected_tpt[rs_index]; in iwl4965_get_expected_tpt() 419 static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, in iwl4965_rs_collect_tx_data() argument 430 window = &(tbl->win[scale_index]); in iwl4965_rs_collect_tx_data() 433 tpt = iwl4965_get_expected_tpt(tbl, scale_index); in iwl4965_rs_collect_tx_data() 497 struct iwl_scale_tbl_info *tbl, in iwl4965_rate_n_flags_from_tbl() argument 502 if (is_legacy(tbl->lq_type)) { in iwl4965_rate_n_flags_from_tbl() 507 } else if (is_Ht(tbl->lq_type)) { in iwl4965_rate_n_flags_from_tbl() 514 if (is_siso(tbl->lq_type)) in iwl4965_rate_n_flags_from_tbl() [all …]
|
D | iwl-legacy-rs.h | 282 #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) argument 283 #define is_siso(tbl) ((tbl) == LQ_SISO) argument 284 #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) argument 285 #define is_mimo(tbl) (is_mimo2(tbl)) argument 286 #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) argument 287 #define is_a_band(tbl) ((tbl) == LQ_A) argument 288 #define is_g_and(tbl) ((tbl) == LQ_G) argument
|
/linux-2.6.39/net/netfilter/ |
D | xt_repldata.h | 16 } *tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); \ 17 if (tbl == NULL) \ 19 strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \ 20 tbl->term = (struct type##_error)typ2##_ERROR_INIT; \ 21 tbl->repl.valid_hooks = hook_mask; \ 22 tbl->repl.num_entries = nhooks + 1; \ 23 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \ 28 tbl->repl.hook_entry[hooknum] = bytes; \ 29 tbl->repl.underflow[hooknum] = bytes; \ 30 tbl->entries[i++] = (struct type##_standard) \ [all …]
|
/linux-2.6.39/arch/powerpc/platforms/iseries/ |
D | iommu.c | 45 static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages, in tce_build_iSeries() argument 56 if (tbl->it_type == TCE_VB) { in tce_build_iSeries() 68 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce); in tce_build_iSeries() 78 static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) in tce_free_iSeries() argument 83 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); in tce_free_iSeries() 120 struct iommu_table* tbl) in iommu_table_getparms_iSeries() argument 138 tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE; in iommu_table_getparms_iSeries() 139 tbl->it_busno = parms->itc_busno; in iommu_table_getparms_iSeries() 140 tbl->it_offset = parms->itc_offset; in iommu_table_getparms_iSeries() 141 tbl->it_index = parms->itc_index; in iommu_table_getparms_iSeries() [all …]
|
/linux-2.6.39/include/net/ |
D | neighbour.h | 48 struct neigh_table *tbl; member 91 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) argument 95 struct neigh_table *tbl; member 190 extern void neigh_table_init(struct neigh_table *tbl); 191 extern void neigh_table_init_no_netlink(struct neigh_table *tbl); 192 extern int neigh_table_clear(struct neigh_table *tbl); 193 extern struct neighbour * neigh_lookup(struct neigh_table *tbl, 196 extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl, 199 extern struct neighbour * neigh_create(struct neigh_table *tbl, 206 extern void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); [all …]
|
/linux-2.6.39/arch/powerpc/platforms/pseries/ |
D | iommu.c | 54 static int tce_build_pSeries(struct iommu_table *tbl, long index, in tce_build_pSeries() argument 68 tcep = ((u64 *)tbl->it_base) + index; in tce_build_pSeries() 82 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) in tce_free_pSeries() argument 86 tcep = ((u64 *)tbl->it_base) + index; in tce_free_pSeries() 92 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) in tce_get_pseries() argument 96 tcep = ((u64 *)tbl->it_base) + index; in tce_get_pseries() 104 static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, in tce_build_pSeriesLP() argument 122 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce); in tce_build_pSeriesLP() 126 tce_free_pSeriesLP(tbl, tcenum_start, in tce_build_pSeriesLP() 133 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); in tce_build_pSeriesLP() [all …]
|
/linux-2.6.39/scripts/dtc/ |
D | livetree.c | 495 struct reserve_info *ri, **tbl; in sort_reserve_entries() local 506 tbl = xmalloc(n * sizeof(*tbl)); in sort_reserve_entries() 511 tbl[i++] = ri; in sort_reserve_entries() 513 qsort(tbl, n, sizeof(*tbl), cmp_reserve_info); in sort_reserve_entries() 515 bi->reservelist = tbl[0]; in sort_reserve_entries() 517 tbl[i]->next = tbl[i+1]; in sort_reserve_entries() 518 tbl[n-1]->next = NULL; in sort_reserve_entries() 520 free(tbl); in sort_reserve_entries() 536 struct property *prop, **tbl; in sort_properties() local 544 tbl = xmalloc(n * sizeof(*tbl)); in sort_properties() [all …]
|
/linux-2.6.39/arch/unicore32/mm/ |
D | proc-macros.S | 97 .macro va2pa, va, pa, tbl, msk, off, err=990f 100 adr \tbl, 910f @ tbl <- table of 1st page table 107 add \tbl, \tbl, \off << #3 @ cmove table pointer 108 ldw \msk, [\tbl+], #0 @ get the mask 109 ldw pc, [\tbl+], #4 113 cntlo \tbl, \msk @ use tbl as temp reg 114 mov \off, \off >> \tbl 116 adr \tbl, 920f @ tbl <- table of 2nd pt 130 andn \tbl, \va, \msk 132 or \pa, \pa, \tbl
|
/linux-2.6.39/drivers/staging/cxt1e1/ |
D | sbecrc.c | 93 u_int32_t *tbl = 0; in sbeCrc() local 104 tbl = &CRCTable; in sbeCrc() 105 genCrcTable (tbl); in sbeCrc() 107 tbl = (u_int32_t *) OS_kmalloc (CRC_TABLE_ENTRIES * sizeof (u_int32_t)); in sbeCrc() 108 if (tbl == 0) in sbeCrc() 114 genCrcTable (tbl); in sbeCrc() 123 temp2 = tbl[((int) crc ^ *buffer++) & 0xff]; in sbeCrc() 133 OS_kfree (tbl); in sbeCrc()
|
/linux-2.6.39/net/mac80211/ |
D | mesh_pathtbl.c | 43 static void __mesh_table_free(struct mesh_table *tbl) in __mesh_table_free() argument 45 kfree(tbl->hash_buckets); in __mesh_table_free() 46 kfree(tbl->hashwlock); in __mesh_table_free() 47 kfree(tbl); in __mesh_table_free() 50 void mesh_table_free(struct mesh_table *tbl, bool free_leafs) in mesh_table_free() argument 56 mesh_hash = tbl->hash_buckets; in mesh_table_free() 57 for (i = 0; i <= tbl->hash_mask; i++) { in mesh_table_free() 58 spin_lock(&tbl->hashwlock[i]); in mesh_table_free() 60 tbl->free_node(p, free_leafs); in mesh_table_free() 61 atomic_dec(&tbl->entries); in mesh_table_free() [all …]
|
/linux-2.6.39/arch/powerpc/include/asm/ |
D | iommu.h | 84 extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); 89 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, 92 extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 96 extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 100 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 103 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 105 extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 110 extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
|
/linux-2.6.39/lib/ |
D | devres.c | 209 void __iomem **tbl; in pcim_iomap() local 213 tbl = (void __iomem **)pcim_iomap_table(pdev); in pcim_iomap() 214 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ in pcim_iomap() 217 tbl[bar] = pci_iomap(pdev, bar, maxlen); in pcim_iomap() 218 return tbl[bar]; in pcim_iomap() 231 void __iomem **tbl; in pcim_iounmap() local 236 tbl = (void __iomem **)pcim_iomap_table(pdev); in pcim_iounmap() 237 BUG_ON(!tbl); in pcim_iounmap() 240 if (tbl[i] == addr) { in pcim_iounmap() 241 tbl[i] = NULL; in pcim_iounmap()
|
/linux-2.6.39/net/ipv4/ |
D | sysctl_net_ipv4.c | 75 ctl_table tbl = { in proc_tcp_congestion_control() local 83 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_tcp_congestion_control() 94 ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, }; in proc_tcp_available_congestion_control() local 97 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_tcp_available_congestion_control() 98 if (!tbl.data) in proc_tcp_available_congestion_control() 100 tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX); in proc_tcp_available_congestion_control() 101 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_tcp_available_congestion_control() 102 kfree(tbl.data); in proc_tcp_available_congestion_control() 111 ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; in proc_allowed_congestion_control() local 114 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_allowed_congestion_control() [all …]
|