Searched refs:read_pda (Results 1 – 9 of 9) sorted by relevance
29 if ((ret = read_pda(pmd_quick)) != NULL) { in get_pmd_fast()40 *(unsigned long *)pmd = (unsigned long) read_pda(pmd_quick); in pmd_free()54 unsigned long *ret = (unsigned long *)read_pda(pmd_quick); in pmd_alloc_one_fast()71 unsigned long *ret = read_pda(pgd_quick); in pgd_alloc_one_fast()93 *(unsigned long *)pgd = (unsigned long) read_pda(pgd_quick); in pgd_free()116 if ((ret = read_pda(pte_quick)) != NULL) { in pte_alloc_one_fast()129 *(unsigned long *)pte = (unsigned long) read_pda(pte_quick); in pte_free()
53 *read_pda(level4_pgt) = __pa(next->pgd) | _PAGE_TABLE; in switch_mm()66 *read_pda(level4_pgt) = __pa(next->pgd) | _PAGE_TABLE; in switch_mm()
11 struct task_struct *t = read_pda(pcurrent); in get_current()
84 #define smp_processor_id() read_pda(cpunumber)
63 #define read_pda(field) pda_from_op("mov",field) macro
385 __pgd_offset_k((pgd_t *)read_pda(level4_pgt), address)405 #define pml4_offset_k(address) ((pml4_t *)read_pda(level4_pgt) + pml4_index(address))
47 if(read_pda(pgtable_cache_sz) > high) { in do_check_pgt_cache()49 if (read_pda(pgd_quick)) { in do_check_pgt_cache()53 if (read_pda(pmd_quick)) { in do_check_pgt_cache()57 if (read_pda(pte_quick)) { in do_check_pgt_cache()61 } while(read_pda(pgtable_cache_sz) > low); in do_check_pgt_cache()95 printk("%ld pages in page table cache\n",read_pda(pgtable_cache_sz)); in show_mem()
231 *read_pda(level4_pgt) = __pa(init_mm.pgd) | _PAGE_TABLE; in leave_mm()
628 prev->userrsp = read_pda(oldrsp); in __switch_to()