Home
last modified time | relevance | path

Searched refs:ptes (Results 1 – 21 of 21) sorted by relevance

/linux-2.6.39/fs/partitions/
Defi.c294 gpt_header **gpt, gpt_entry **ptes) in is_gpt_valid() argument
299 if (!ptes) in is_gpt_valid()
357 if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) in is_gpt_valid()
361 crc = efi_crc32((const unsigned char *) (*ptes), in is_gpt_valid()
374 kfree(*ptes); in is_gpt_valid()
375 *ptes = NULL; in is_gpt_valid()
516 gpt_entry **ptes) in find_valid_gpt() argument
524 if (!ptes) in find_valid_gpt()
559 *ptes = pptes; in find_valid_gpt()
571 *ptes = aptes; in find_valid_gpt()
[all …]
/linux-2.6.39/arch/alpha/kernel/
Dpci_iommu.c83 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0); in iommu_arena_new_node()
84 if (!NODE_DATA(nid) || !arena->ptes) { in iommu_arena_new_node()
88 arena->ptes = __alloc_bootmem(mem_size, align, 0); in iommu_arena_new_node()
94 arena->ptes = __alloc_bootmem(mem_size, align, 0); in iommu_arena_new_node()
123 unsigned long *ptes; in iommu_arena_find_pages() local
138 ptes = arena->ptes; in iommu_arena_find_pages()
150 if (ptes[p+i]) in iommu_arena_find_pages()
182 unsigned long *ptes; in iommu_arena_alloc() local
188 ptes = arena->ptes; in iommu_arena_alloc()
201 ptes[p+i] = IOMMU_INVALID_PTE; in iommu_arena_alloc()
[all …]
Dcore_titan.c326 port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); in titan_init_one_pachip_port()
334 port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); in titan_init_one_pachip_port()
461 unsigned long *ptes; in titan_ioremap() local
514 ptes = hose->sg_pci->ptes; in titan_ioremap()
518 pfn = ptes[baddr >> PAGE_SHIFT]; in titan_ioremap()
707 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in titan_agp_translate()
Dcore_marvel.c291 csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes); in io7_init_hose()
309 csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes); in io7_init_hose()
685 unsigned long *ptes; in marvel_ioremap() local
740 ptes = hose->sg_pci->ptes; in marvel_ioremap()
744 pfn = ptes[baddr >> PAGE_SHIFT]; in marvel_ioremap()
1043 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in marvel_agp_translate()
Dpci_impl.h138 unsigned long *ptes; member
Dcore_cia.c459 arena->ptes[4] = pte0; in verify_tb_operation()
483 arena->ptes[5] = pte0; in verify_tb_operation()
519 arena->ptes[4] = 0; in verify_tb_operation()
520 arena->ptes[5] = 0; in verify_tb_operation()
732 *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2; in do_init_arch()
Dcore_tsunami.c333 pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); in tsunami_init_one_pchip()
337 pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes); in tsunami_init_one_pchip()
Dcore_mcpcia.c375 *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8; in mcpcia_startup_hose()
379 *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8; in mcpcia_startup_hose()
Dcore_apecs.c358 *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1; in apecs_init_arch()
Dcore_wildfire.c120 pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes); in wildfire_init_hose()
132 pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes); in wildfire_init_hose()
Dcore_lca.c284 *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes); in lca_init_arch()
Dcore_t2.c359 *(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1; in t2_sg_map_window2()
/linux-2.6.39/arch/x86/mm/
Dinit.c36 unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; in find_early_table_space() local
59 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; in find_early_table_space()
61 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; in find_early_table_space()
63 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); in find_early_table_space()
/linux-2.6.39/arch/powerpc/platforms/pseries/
Dlpar.c394 } ptes[4]; in pSeries_lpar_hptab_clear() local
403 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); in pSeries_lpar_hptab_clear()
407 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == in pSeries_lpar_hptab_clear()
410 if (ptes[j].pteh & HPTE_V_VALID) in pSeries_lpar_hptab_clear()
412 &(ptes[j].pteh), &(ptes[j].ptel)); in pSeries_lpar_hptab_clear()
Dplpar_wrappers.h199 unsigned long *ptes) in plpar_pte_read_4_raw() argument
207 memcpy(ptes, retbuf, 8*sizeof(unsigned long)); in plpar_pte_read_4_raw()
/linux-2.6.39/arch/x86/kvm/
Dpaging_tmpl.h67 pt_element_t ptes[PT_MAX_FULL_LEVELS]; member
206 walker->ptes[walker->level - 1] = pte; in FNAME()
257 walker->ptes[walker->level - 1] = pte; in FNAME()
377 return r || curr_pte != gw->ptes[level - 1]; in FNAME()
444 bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]); in FNAME()
449 if (!is_present_gpte(gw->ptes[gw->level - 1])) in FNAME()
/linux-2.6.39/Documentation/kvm/
Dmmu.txt115 Leaf ptes point at guest pages.
117 The following table shows translations encoded by leaf ptes, with higher-level
146 sptes. That means a guest page table contains more ptes than the host,
206 changed but before the tlb entry is flushed. Accordingly, unsync ptes
220 The mmu maintains a reverse mapping whereby all ptes mapping a page can be
/linux-2.6.39/drivers/staging/gma500/
Dpsb_mmu.c314 uint32_t *ptes; in psb_mmu_alloc_pt() local
330 ptes = (uint32_t *) v; in psb_mmu_alloc_pt()
332 *ptes++ = pd->invalid_pte; in psb_mmu_alloc_pt()
/linux-2.6.39/Documentation/cgroups/
Dmemcg_test.txt99 and pages for ptes are freed one by one.(see mm/memory.c). Uncharges
155 swp_entry's refcnt += # of ptes.
/linux-2.6.39/Documentation/vm/
Dpage_migration137 16. If migration entries were page then replace them with real ptes. Doing
Dunevictable-lru.txt372 allocate the huge pages and populate the ptes.
378 make_pages_present() to populate the ptes.