1 /* $NetBSD: pmap.h,v 1.80 2018/06/20 11:49:38 maxv Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * Copyright (c) 2001 Wasabi Systems, Inc. 30 * All rights reserved. 31 * 32 * Written by Frank van der Linden for Wasabi Systems, Inc. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. All advertising materials mentioning features or use of this software 43 * must display the following acknowledgement: 44 * This product includes software developed for the NetBSD Project by 45 * Wasabi Systems, Inc. 46 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 47 * or promote products derived from this software without specific prior 48 * written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 52 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 53 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 60 * POSSIBILITY OF SUCH DAMAGE. 61 */ 62 63 /* 64 * pmap.h: see pmap.c for the history of this pmap module. 65 */ 66 67 #ifndef _X86_PMAP_H_ 68 #define _X86_PMAP_H_ 69 70 /* 71 * pl*_pi: index in the ptp page for a pde mapping a VA. 72 * (pl*_i below is the index in the virtual array of all pdes per level) 73 */ 74 #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT) 75 #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT) 76 #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT) 77 #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT) 78 79 /* 80 * pl*_i: generate index into pde/pte arrays in virtual space 81 * 82 * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X) 83 */ 84 #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT) 85 #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT) 86 #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT) 87 #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT) 88 #define pl_i(va, lvl) \ 89 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1]) 90 91 #define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl)) 92 93 /* 94 * PTP macros: 95 * a PTP's index is the PD index of the PDE that points to it 96 * a PTP's offset is the byte-offset in the PTE space that this PTP is at 97 * a PTP's VA is the first VA mapped by that PTP 98 */ 99 100 #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE) 101 102 /* size of a PDP: usually one page, except for PAE */ 103 #ifdef PAE 104 #define PDP_SIZE 4 105 #else 106 #define PDP_SIZE 1 107 #endif 108 109 110 #if defined(_KERNEL) 111 #include <sys/kcpuset.h> 112 #include <uvm/pmap/pmap_pvt.h> 113 114 #define BTSEG_NONE 0 115 #define BTSEG_TEXT 1 116 #define BTSEG_RODATA 2 117 #define BTSEG_DATA 3 118 #define BTSPACE_NSEGS 64 119 120 struct bootspace { 121 struct { 122 vaddr_t va; 123 paddr_t pa; 124 size_t sz; 125 } head; 126 127 /* Kernel segments. */ 128 struct { 129 int type; 130 vaddr_t va; 131 paddr_t pa; 132 size_t sz; 133 } segs[BTSPACE_NSEGS]; 134 135 /* 136 * The area used by the early kernel bootstrap. It contains the kernel 137 * symbols, the preloaded modules, the bootstrap tables, and the ISA I/O 138 * mem. 139 */ 140 struct { 141 vaddr_t va; 142 paddr_t pa; 143 size_t sz; 144 } boot; 145 146 /* A magic VA usable by the bootstrap code. */ 147 vaddr_t spareva; 148 149 /* Virtual address of the page directory. */ 150 vaddr_t pdir; 151 152 /* Area dedicated to kernel modules (amd64 only). */ 153 vaddr_t smodule; 154 vaddr_t emodule; 155 }; 156 157 #ifndef MAXGDTSIZ 158 #define MAXGDTSIZ 65536 /* XXX */ 159 #endif 160 161 struct pcpu_entry { 162 uint8_t gdt[MAXGDTSIZ]; 163 uint8_t tss[PAGE_SIZE]; 164 uint8_t ist0[PAGE_SIZE]; 165 uint8_t ist1[PAGE_SIZE]; 166 uint8_t ist2[PAGE_SIZE]; 167 uint8_t ist3[PAGE_SIZE]; 168 uint8_t rsp0[2 * PAGE_SIZE]; 169 } __packed; 170 171 struct pcpu_area { 172 #ifdef SVS 173 uint8_t utls[PAGE_SIZE]; 174 #endif 175 uint8_t idt[PAGE_SIZE]; 176 uint8_t ldt[PAGE_SIZE]; 177 struct pcpu_entry ent[MAXCPUS]; 178 } __packed; 179 180 extern struct pcpu_area *pcpuarea; 181 182 /* 183 * pmap data structures: see pmap.c for details of locking. 184 */ 185 186 /* 187 * we maintain a list of all non-kernel pmaps 188 */ 189 190 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 191 192 /* 193 * linked list of all non-kernel pmaps 194 */ 195 extern struct pmap_head pmaps; 196 extern kmutex_t pmaps_lock; /* protects pmaps */ 197 198 /* 199 * pool_cache(9) that PDPs are allocated from 200 */ 201 extern struct pool_cache pmap_pdp_cache; 202 203 /* 204 * the pmap structure 205 * 206 * note that the pm_obj contains the lock pointer, the reference count, 207 * page list, and number of PTPs within the pmap. 208 * 209 * pm_lock is the same as the lock for vm object 0. Changes to 210 * the other objects may only be made if that lock has been taken 211 * (the other object locks are only used when uvm_pagealloc is called) 212 */ 213 214 struct pmap { 215 struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */ 216 #define pm_lock pm_obj[0].vmobjlock 217 kmutex_t pm_obj_lock[PTP_LEVELS-1]; /* locks for pm_objs */ 218 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 219 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */ 220 paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */ 221 struct vm_page *pm_ptphint[PTP_LEVELS-1]; 222 /* pointer to a PTP in our pmap */ 223 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ 224 225 #if !defined(__x86_64__) 226 vaddr_t pm_hiexec; /* highest executable mapping */ 227 #endif /* !defined(__x86_64__) */ 228 int pm_flags; /* see below */ 229 230 union descriptor *pm_ldt; /* user-set LDT */ 231 size_t pm_ldt_len; /* size of LDT in bytes */ 232 int pm_ldt_sel; /* LDT selector */ 233 kcpuset_t *pm_cpus; /* mask of CPUs using pmap */ 234 kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part 235 of pmap */ 236 kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's 237 ptp mapped */ 238 uint64_t pm_ncsw; /* for assertions */ 239 struct vm_page *pm_gc_ptp; /* pages from pmap g/c */ 240 }; 241 242 /* macro to access pm_pdirpa slots */ 243 #ifdef PAE 244 #define pmap_pdirpa(pmap, index) \ 245 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t)) 246 #else 247 #define pmap_pdirpa(pmap, index) \ 248 ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t)) 249 #endif 250 251 /* 252 * MD flags that we use for pmap_enter and pmap_kenter_pa: 253 */ 254 255 /* 256 * global kernel variables 257 */ 258 259 /* 260 * PDPpaddr is the physical address of the kernel's PDP. 261 * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3 262 * value associated to the kernel process, proc0. 263 * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to 264 * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more. 265 * - Xen: it corresponds to the PFN of the kernel's PDP. 266 */ 267 extern u_long PDPpaddr; 268 269 extern pd_entry_t pmap_pg_g; /* do we support PG_G? */ 270 extern pd_entry_t pmap_pg_nx; /* do we support PG_NX? */ 271 extern int pmap_largepages; 272 extern long nkptp[PTP_LEVELS]; 273 274 /* 275 * macros 276 */ 277 278 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 279 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 280 281 #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M) 282 #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U) 283 #define pmap_copy(DP,SP,D,L,S) __USE(L) 284 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M) 285 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U) 286 #define pmap_move(DP,SP,D,L,S) 287 #define pmap_phys_address(ppn) (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK) 288 #define pmap_mmap_flags(ppn) x86_mmap_flags(ppn) 289 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 290 291 #if defined(__x86_64__) || defined(PAE) 292 #define X86_MMAP_FLAG_SHIFT (64 - PGSHIFT) 293 #else 294 #define X86_MMAP_FLAG_SHIFT (32 - PGSHIFT) 295 #endif 296 297 #define X86_MMAP_FLAG_MASK 0xf 298 #define X86_MMAP_FLAG_PREFETCH 0x1 299 300 /* 301 * prototypes 302 */ 303 304 void pmap_activate(struct lwp *); 305 void pmap_bootstrap(vaddr_t); 306 bool pmap_clear_attrs(struct vm_page *, unsigned); 307 bool pmap_pv_clear_attrs(paddr_t, unsigned); 308 void pmap_deactivate(struct lwp *); 309 void pmap_page_remove(struct vm_page *); 310 void pmap_pv_remove(paddr_t); 311 void pmap_remove(struct pmap *, vaddr_t, vaddr_t); 312 bool pmap_test_attrs(struct vm_page *, unsigned); 313 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); 314 void pmap_load(void); 315 paddr_t pmap_init_tmp_pgtbl(paddr_t); 316 void pmap_remove_all(struct pmap *); 317 void pmap_ldt_cleanup(struct lwp *); 318 void pmap_ldt_sync(struct pmap *); 319 void pmap_kremove_local(vaddr_t, vsize_t); 320 321 #define __HAVE_PMAP_PV_TRACK 1 322 void pmap_pv_init(void); 323 void pmap_pv_track(paddr_t, psize_t); 324 void pmap_pv_untrack(paddr_t, psize_t); 325 326 void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **, 327 pd_entry_t * const **); 328 void pmap_unmap_ptes(struct pmap *, struct pmap *); 329 330 int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *); 331 332 u_int x86_mmap_flags(paddr_t); 333 334 bool pmap_is_curpmap(struct pmap *); 335 336 #ifndef __HAVE_DIRECT_MAP 337 void pmap_vpage_cpu_init(struct cpu_info *); 338 #endif 339 340 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */ 341 342 typedef enum tlbwhy { 343 TLBSHOOT_APTE, 344 TLBSHOOT_KENTER, 345 TLBSHOOT_KREMOVE, 346 TLBSHOOT_FREE_PTP1, 347 TLBSHOOT_FREE_PTP2, 348 TLBSHOOT_REMOVE_PTE, 349 TLBSHOOT_REMOVE_PTES, 350 TLBSHOOT_SYNC_PV1, 351 TLBSHOOT_SYNC_PV2, 352 TLBSHOOT_WRITE_PROTECT, 353 TLBSHOOT_ENTER, 354 TLBSHOOT_UPDATE, 355 TLBSHOOT_BUS_DMA, 356 TLBSHOOT_BUS_SPACE, 357 TLBSHOOT__MAX, 358 } tlbwhy_t; 359 360 void pmap_tlb_init(void); 361 void pmap_tlb_cpu_init(struct cpu_info *); 362 void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t); 363 void pmap_tlb_shootnow(void); 364 void pmap_tlb_intr(void); 365 366 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 367 #define PMAP_FORK /* turn on pmap_fork interface */ 368 369 /* 370 * Do idle page zero'ing uncached to avoid polluting the cache. 371 */ 372 bool pmap_pageidlezero(paddr_t); 373 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa)) 374 375 /* 376 * inline functions 377 */ 378 379 __inline static bool __unused 380 pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde) 381 { 382 return pmap_pdes_invalid(va, pdes, lastpde) == 0; 383 } 384 385 /* 386 * pmap_update_pg: flush one page from the TLB (or flush the whole thing 387 * if hardware doesn't support one-page flushing) 388 */ 389 390 __inline static void __unused 391 pmap_update_pg(vaddr_t va) 392 { 393 invlpg(va); 394 } 395 396 /* 397 * pmap_page_protect: change the protection of all recorded mappings 398 * of a managed page 399 * 400 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs 401 * => we only have to worry about making the page more protected. 402 * unprotecting a page is done on-demand at fault time. 403 */ 404 405 __inline static void __unused 406 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 407 { 408 if ((prot & VM_PROT_WRITE) == 0) { 409 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 410 (void) pmap_clear_attrs(pg, PG_RW); 411 } else { 412 pmap_page_remove(pg); 413 } 414 } 415 } 416 417 /* 418 * pmap_pv_protect: change the protection of all recorded mappings 419 * of an unmanaged page 420 */ 421 422 __inline static void __unused 423 pmap_pv_protect(paddr_t pa, vm_prot_t prot) 424 { 425 if ((prot & VM_PROT_WRITE) == 0) { 426 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 427 (void) pmap_pv_clear_attrs(pa, PG_RW); 428 } else { 429 pmap_pv_remove(pa); 430 } 431 } 432 } 433 434 /* 435 * pmap_protect: change the protection of pages in a pmap 436 * 437 * => this function is a frontend for pmap_remove/pmap_write_protect 438 * => we only have to worry about making the page more protected. 439 * unprotecting a page is done on-demand at fault time. 440 */ 441 442 __inline static void __unused 443 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 444 { 445 if ((prot & VM_PROT_WRITE) == 0) { 446 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 447 pmap_write_protect(pmap, sva, eva, prot); 448 } else { 449 pmap_remove(pmap, sva, eva); 450 } 451 } 452 } 453 454 /* 455 * various address inlines 456 * 457 * vtopte: return a pointer to the PTE mapping a VA, works only for 458 * user and PT addresses 459 * 460 * kvtopte: return a pointer to the PTE mapping a kernel VA 461 */ 462 463 #include <lib/libkern/libkern.h> 464 465 static __inline pt_entry_t * __unused 466 vtopte(vaddr_t va) 467 { 468 469 KASSERT(va < VM_MIN_KERNEL_ADDRESS); 470 471 return (PTE_BASE + pl1_i(va)); 472 } 473 474 static __inline pt_entry_t * __unused 475 kvtopte(vaddr_t va) 476 { 477 pd_entry_t *pde; 478 479 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 480 481 pde = L2_BASE + pl2_i(va); 482 if (*pde & PG_PS) 483 return ((pt_entry_t *)pde); 484 485 return (PTE_BASE + pl1_i(va)); 486 } 487 488 paddr_t vtophys(vaddr_t); 489 vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t); 490 void pmap_cpu_init_late(struct cpu_info *); 491 bool sse2_idlezero_page(void *); 492 493 #ifdef XEN 494 #include <sys/bitops.h> 495 496 #define XPTE_MASK L1_FRAME 497 /* Selects the index of a PTE in (A)PTE_BASE */ 498 #define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t))) 499 500 /* PTE access inline fuctions */ 501 502 /* 503 * Get the machine address of the pointed pte 504 * We use hardware MMU to get value so works only for levels 1-3 505 */ 506 507 static __inline paddr_t 508 xpmap_ptetomach(pt_entry_t *pte) 509 { 510 pt_entry_t *up_pte; 511 vaddr_t va = (vaddr_t) pte; 512 513 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE; 514 up_pte = (pt_entry_t *) va; 515 516 return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK))); 517 } 518 519 /* Xen helpers to change bits of a pte */ 520 #define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */ 521 522 paddr_t vtomach(vaddr_t); 523 #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT) 524 #endif /* XEN */ 525 526 /* pmap functions with machine addresses */ 527 void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int); 528 int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t, 529 vm_prot_t, u_int, int); 530 bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *); 531 void pmap_free_ptps(struct vm_page *); 532 533 /* 534 * Hooks for the pool allocator. 535 */ 536 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va)) 537 538 #ifdef __HAVE_PCPU_AREA 539 extern struct pcpu_area *pcpuarea; 540 #define PDIR_SLOT_PCPU 384 541 #define PMAP_PCPU_BASE (VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4))) 542 #endif 543 544 #ifdef __HAVE_DIRECT_MAP 545 546 extern vaddr_t pmap_direct_base; 547 extern vaddr_t pmap_direct_end; 548 549 #define L4_SLOT_DIRECT 456 550 #define PDIR_SLOT_DIRECT L4_SLOT_DIRECT 551 552 #define NL4_SLOT_DIRECT 32 553 554 #define PMAP_DIRECT_DEFAULT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4))) 555 556 #define PMAP_DIRECT_BASE pmap_direct_base 557 #define PMAP_DIRECT_END pmap_direct_end 558 559 #define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + (pa)) 560 #define PMAP_DIRECT_UNMAP(va) ((paddr_t)(va) - PMAP_DIRECT_BASE) 561 562 /* 563 * Alternate mapping hooks for pool pages. 564 */ 565 #define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP((pa)) 566 #define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP((va)) 567 568 void pagezero(vaddr_t); 569 570 #endif /* __HAVE_DIRECT_MAP */ 571 572 #endif /* _KERNEL */ 573 574 #endif /* _X86_PMAP_H_ */ 575