/linux-2.6.39/fs/partitions/ |
D | efi.c | 294 gpt_header **gpt, gpt_entry **ptes) in is_gpt_valid() argument 299 if (!ptes) in is_gpt_valid() 357 if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) in is_gpt_valid() 361 crc = efi_crc32((const unsigned char *) (*ptes), in is_gpt_valid() 374 kfree(*ptes); in is_gpt_valid() 375 *ptes = NULL; in is_gpt_valid() 516 gpt_entry **ptes) in find_valid_gpt() argument 524 if (!ptes) in find_valid_gpt() 559 *ptes = pptes; in find_valid_gpt() 571 *ptes = aptes; in find_valid_gpt() [all …]
|
/linux-2.6.39/arch/alpha/kernel/ |
D | pci_iommu.c | 83 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0); in iommu_arena_new_node() 84 if (!NODE_DATA(nid) || !arena->ptes) { in iommu_arena_new_node() 88 arena->ptes = __alloc_bootmem(mem_size, align, 0); in iommu_arena_new_node() 94 arena->ptes = __alloc_bootmem(mem_size, align, 0); in iommu_arena_new_node() 123 unsigned long *ptes; in iommu_arena_find_pages() local 138 ptes = arena->ptes; in iommu_arena_find_pages() 150 if (ptes[p+i]) in iommu_arena_find_pages() 182 unsigned long *ptes; in iommu_arena_alloc() local 188 ptes = arena->ptes; in iommu_arena_alloc() 201 ptes[p+i] = IOMMU_INVALID_PTE; in iommu_arena_alloc() [all …]
|
D | core_titan.c | 326 port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); in titan_init_one_pachip_port() 334 port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); in titan_init_one_pachip_port() 461 unsigned long *ptes; in titan_ioremap() local 514 ptes = hose->sg_pci->ptes; in titan_ioremap() 518 pfn = ptes[baddr >> PAGE_SHIFT]; in titan_ioremap() 707 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in titan_agp_translate()
|
D | core_marvel.c | 291 csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes); in io7_init_hose() 309 csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes); in io7_init_hose() 685 unsigned long *ptes; in marvel_ioremap() local 740 ptes = hose->sg_pci->ptes; in marvel_ioremap() 744 pfn = ptes[baddr >> PAGE_SHIFT]; in marvel_ioremap() 1043 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in marvel_agp_translate()
|
D | pci_impl.h | 138 unsigned long *ptes; member
|
D | core_cia.c | 459 arena->ptes[4] = pte0; in verify_tb_operation() 483 arena->ptes[5] = pte0; in verify_tb_operation() 519 arena->ptes[4] = 0; in verify_tb_operation() 520 arena->ptes[5] = 0; in verify_tb_operation() 732 *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2; in do_init_arch()
|
D | core_tsunami.c | 333 pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); in tsunami_init_one_pchip() 337 pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes); in tsunami_init_one_pchip()
|
D | core_mcpcia.c | 375 *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8; in mcpcia_startup_hose() 379 *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8; in mcpcia_startup_hose()
|
D | core_apecs.c | 358 *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1; in apecs_init_arch()
|
D | core_wildfire.c | 120 pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes); in wildfire_init_hose() 132 pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes); in wildfire_init_hose()
|
D | core_lca.c | 284 *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes); in lca_init_arch()
|
D | core_t2.c | 359 *(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1; in t2_sg_map_window2()
|
/linux-2.6.39/arch/x86/mm/ |
D | init.c | 36 unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; in find_early_table_space() local 59 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; in find_early_table_space() 61 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; in find_early_table_space() 63 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); in find_early_table_space()
|
/linux-2.6.39/arch/powerpc/platforms/pseries/ |
D | lpar.c | 394 } ptes[4]; in pSeries_lpar_hptab_clear() local 403 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); in pSeries_lpar_hptab_clear() 407 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == in pSeries_lpar_hptab_clear() 410 if (ptes[j].pteh & HPTE_V_VALID) in pSeries_lpar_hptab_clear() 412 &(ptes[j].pteh), &(ptes[j].ptel)); in pSeries_lpar_hptab_clear()
|
D | plpar_wrappers.h | 199 unsigned long *ptes) in plpar_pte_read_4_raw() argument 207 memcpy(ptes, retbuf, 8*sizeof(unsigned long)); in plpar_pte_read_4_raw()
|
/linux-2.6.39/arch/x86/kvm/ |
D | paging_tmpl.h | 67 pt_element_t ptes[PT_MAX_FULL_LEVELS]; member 206 walker->ptes[walker->level - 1] = pte; in FNAME() 257 walker->ptes[walker->level - 1] = pte; in FNAME() 377 return r || curr_pte != gw->ptes[level - 1]; in FNAME() 444 bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]); in FNAME() 449 if (!is_present_gpte(gw->ptes[gw->level - 1])) in FNAME()
|
/linux-2.6.39/Documentation/kvm/ |
D | mmu.txt | 115 Leaf ptes point at guest pages. 117 The following table shows translations encoded by leaf ptes, with higher-level 146 sptes. That means a guest page table contains more ptes than the host, 206 changed but before the tlb entry is flushed. Accordingly, unsync ptes 220 The mmu maintains a reverse mapping whereby all ptes mapping a page can be
|
/linux-2.6.39/drivers/staging/gma500/ |
D | psb_mmu.c | 314 uint32_t *ptes; in psb_mmu_alloc_pt() local 330 ptes = (uint32_t *) v; in psb_mmu_alloc_pt() 332 *ptes++ = pd->invalid_pte; in psb_mmu_alloc_pt()
|
/linux-2.6.39/Documentation/cgroups/ |
D | memcg_test.txt | 99 and pages for ptes are freed one by one.(see mm/memory.c). Uncharges 155 swp_entry's refcnt += # of ptes.
|
/linux-2.6.39/Documentation/vm/ |
D | page_migration | 137 16. If migration entries were page then replace them with real ptes. Doing
|
D | unevictable-lru.txt | 372 allocate the huge pages and populate the ptes. 378 make_pages_present() to populate the ptes.
|