Annotation of sys/arch/arm/include/pmap.h, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: pmap.h,v 1.7 2007/04/21 19:26:04 miod Exp $ */
2: /* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */
3:
4: /*
5: * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
6: * All rights reserved.
7: *
8: * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed for the NetBSD Project by
21: * Wasabi Systems, Inc.
22: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23: * or promote products derived from this software without specific prior
24: * written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
39: /*
40: * Copyright (c) 1994,1995 Mark Brinicombe.
41: * All rights reserved.
42: *
43: * Redistribution and use in source and binary forms, with or without
44: * modification, are permitted provided that the following conditions
45: * are met:
46: * 1. Redistributions of source code must retain the above copyright
47: * notice, this list of conditions and the following disclaimer.
48: * 2. Redistributions in binary form must reproduce the above copyright
49: * notice, this list of conditions and the following disclaimer in the
50: * documentation and/or other materials provided with the distribution.
51: * 3. All advertising materials mentioning features or use of this software
52: * must display the following acknowledgement:
53: * This product includes software developed by Mark Brinicombe
54: * 4. The name of the author may not be used to endorse or promote products
55: * derived from this software without specific prior written permission.
56: *
57: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
58: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
61: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
62: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
63: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
64: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
65: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
66: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67: */
68:
69: #ifndef _ARM32_PMAP_H_
70: #define _ARM32_PMAP_H_
71:
72: #ifdef _KERNEL
73:
74: #include <arm/cpuconf.h>
75: #include <arm/pte.h>
76: #ifndef _LOCORE
77: #include <arm/cpufunc.h>
78: #endif
79:
80: /*
81: * a pmap describes a processes' 4GB virtual address space. this
82: * virtual address space can be broken up into 4096 1MB regions which
83: * are described by L1 PTEs in the L1 table.
84: *
85: * There is a line drawn at KERNEL_BASE. Everything below that line
86: * changes when the VM context is switched. Everything above that line
87: * is the same no matter which VM context is running. This is achieved
88: * by making the L1 PTEs for those slots above KERNEL_BASE reference
89: * kernel L2 tables.
90: *
91: * The basic layout of the virtual address space thus looks like this:
92: *
93: * 0xffffffff
94: * .
95: * .
96: * .
97: * KERNEL_BASE
98: * --------------------
99: * .
100: * .
101: * .
102: * 0x00000000
103: */
104:
105: /*
106: * The number of L2 descriptor tables which can be tracked by an l2_dtable.
107: * A bucket size of 16 provides for 16MB of contiguous virtual address
108: * space per l2_dtable. Most processes will, therefore, require only two or
109: * three of these to map their whole working set.
110: */
111: #define L2_BUCKET_LOG2 4
112: #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
113:
114: /*
115: * Given the above "L2-descriptors-per-l2_dtable" constant, the number
116: * of l2_dtable structures required to track all possible page descriptors
117: * mappable by an L1 translation table is given by the following constants:
118: */
119: #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
120: #define L2_SIZE (1 << L2_LOG2)
121:
122: #ifndef _LOCORE
123:
124: struct l1_ttable;
125: struct l2_dtable;
126:
127: /*
128: * Track cache/tlb occupancy using the following structure
129: */
130: union pmap_cache_state {
131: struct {
132: union {
133: u_int8_t csu_cache_b[2];
134: u_int16_t csu_cache;
135: } cs_cache_u;
136:
137: union {
138: u_int8_t csu_tlb_b[2];
139: u_int16_t csu_tlb;
140: } cs_tlb_u;
141: } cs_s;
142: u_int32_t cs_all;
143: };
144: #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
145: #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
146: #define cs_cache cs_s.cs_cache_u.csu_cache
147: #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
148: #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
149: #define cs_tlb cs_s.cs_tlb_u.csu_tlb
150:
151: /*
152: * Assigned to cs_all to force cacheops to work for a particular pmap
153: */
154: #define PMAP_CACHE_STATE_ALL 0xffffffffu
155:
156: /*
157: * This structure is used by machine-dependent code to describe
158: * static mappings of devices, created at bootstrap time.
159: */
160: struct pmap_devmap {
161: vaddr_t pd_va; /* virtual address */
162: paddr_t pd_pa; /* physical address */
163: psize_t pd_size; /* size of region */
164: vm_prot_t pd_prot; /* protection code */
165: int pd_cache; /* cache attributes */
166: };
167:
168: /*
169: * The pmap structure itself
170: */
171: struct pmap {
172: u_int8_t pm_domain;
173: boolean_t pm_remove_all;
174: struct l1_ttable *pm_l1;
175: union pmap_cache_state pm_cstate;
176: u_int pm_refs;
177: simple_lock_data_t pm_lock;
178: struct l2_dtable *pm_l2[L2_SIZE];
179: struct pmap_statistics pm_stats;
180: LIST_ENTRY(pmap) pm_list;
181: };
182:
183: typedef struct pmap *pmap_t;
184:
185: /*
186: * Physical / virtual address structure. In a number of places (particularly
187: * during bootstrapping) we need to keep track of the physical and virtual
188: * addresses of various pages
189: */
190: typedef struct pv_addr {
191: SLIST_ENTRY(pv_addr) pv_list;
192: paddr_t pv_pa;
193: vaddr_t pv_va;
194: } pv_addr_t;
195:
196: /*
197: * Determine various modes for PTEs (user vs. kernel, cacheable
198: * vs. non-cacheable).
199: */
200: #define PTE_KERNEL 0
201: #define PTE_USER 1
202: #define PTE_NOCACHE 0
203: #define PTE_CACHE 1
204: #define PTE_PAGETABLE 2
205:
206: /*
207: * Flags that indicate attributes of pages or mappings of pages.
208: *
209: * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
210: * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
211: * pv_entry's for each page. They live in the same "namespace" so
212: * that we can clear multiple attributes at a time.
213: *
214: * Note the "non-cacheable" flag generally means the page has
215: * multiple mappings in a given address space.
216: */
217: #define PVF_MOD 0x01 /* page is modified */
218: #define PVF_REF 0x02 /* page is referenced */
219: #define PVF_WIRED 0x04 /* mapping is wired */
220: #define PVF_WRITE 0x08 /* mapping is writable */
221: #define PVF_EXEC 0x10 /* mapping is executable */
222: #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
223: #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
224: #define PVF_NC (PVF_UNC|PVF_KNC)
225:
226: /*
227: * Commonly referenced structures
228: */
229: extern struct pmap kernel_pmap_store;
230: extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
231:
232: /*
233: * Macros that we need to export
234: */
235: #define pmap_kernel() (&kernel_pmap_store)
236: #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
237: #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
238:
239: #define pmap_is_modified(pg) \
240: (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
241: #define pmap_is_referenced(pg) \
242: (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
243:
244: #define pmap_copy(dp, sp, da, l, sa) /* nothing */
245:
246: #define pmap_phys_address(ppn) (ptoa(ppn))
247:
248: #define pmap_proc_iflush(p, va, len) /* nothing */
249: #define pmap_unuse_final(p) /* nothing */
250:
251: /*
252: * Functions that we need to export
253: */
254: void pmap_procwr(struct proc *, vaddr_t, int);
255: void pmap_remove_all(pmap_t);
256: boolean_t pmap_extract(pmap_t, vaddr_t, paddr_t *);
257:
258: #define PMAP_NEED_PROCWR
259: #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
260:
261: /* Functions we use internally. */
262: void pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
263:
264: int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
265: boolean_t pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
266: boolean_t pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
267: void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
268:
269: void pmap_debug(int);
270: void pmap_postinit(void);
271:
272: void vector_page_setprot(int);
273:
274: const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
275: const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
276:
277: /* Bootstrapping routines. */
278: void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
279: void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
280: vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
281: void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
282: void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
283: void pmap_devmap_register(const struct pmap_devmap *);
284:
285: /*
286: * Special page zero routine for use by the idle loop (no cache cleans).
287: */
288: boolean_t pmap_pageidlezero(struct vm_page *);
289: #define PMAP_PAGEIDLEZERO(pg) pmap_pageidlezero((pg))
290:
291: /*
292: * The current top of kernel VM
293: */
294: extern vaddr_t pmap_curmaxkvaddr;
295:
296: /*
297: * Useful macros and constants
298: */
299:
300: /* Virtual address to page table entry */
301: static __inline pt_entry_t *
302: vtopte(vaddr_t va)
303: {
304: pd_entry_t *pdep;
305: pt_entry_t *ptep;
306:
307: if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
308: return (NULL);
309: return (ptep);
310: }
311:
312: /*
313: * The new pmap ensures that page-tables are always mapping Write-Thru.
314: * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
315: * on every change.
316: *
317: * Unfortunately, not all CPUs have a write-through cache mode. So we
318: * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
319: * and if there is the chance for PTE syncs to be needed, we define
320: * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
321: * the code.
322: */
323: extern int pmap_needs_pte_sync;
324:
325: /*
326: * StrongARM SA-1 caches do not have a write-through mode. So, on these,
327: * we need to do PTE syncs. If only SA-1 is configured, then evaluate
328: * this at compile time.
329: */
330: #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
331: #define PMAP_NEEDS_PTE_SYNC 1
332: #define PMAP_INCLUDE_PTE_SYNC
333: #elif (ARM_MMU_SA1 == 0)
334: #define PMAP_NEEDS_PTE_SYNC 0
335: #endif
336:
337: /*
338: * Provide a fallback in case we were not able to determine it at
339: * compile-time.
340: */
341: #ifndef PMAP_NEEDS_PTE_SYNC
342: #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
343: #define PMAP_INCLUDE_PTE_SYNC
344: #endif
345:
346: #define PTE_SYNC(pte) \
347: do { \
348: if (PMAP_NEEDS_PTE_SYNC) \
349: cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
350: } while (/*CONSTCOND*/0)
351:
352: #define PTE_SYNC_RANGE(pte, cnt) \
353: do { \
354: if (PMAP_NEEDS_PTE_SYNC) { \
355: cpu_dcache_wb_range((vaddr_t)(pte), \
356: (cnt) << 2); /* * sizeof(pt_entry_t) */ \
357: } \
358: } while (/*CONSTCOND*/0)
359:
360: #define l1pte_valid(pde) ((pde) != 0)
361: #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
362: #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
363: #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
364:
365: #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
366: #define l2pte_valid(pte) ((pte) != 0)
367: #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
368: #define l2pte_minidata(pte) (((pte) & \
369: (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
370: == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
371:
372: /* L1 and L2 page table macros */
373: #define pmap_pde_v(pde) l1pte_valid(*(pde))
374: #define pmap_pde_section(pde) l1pte_section_p(*(pde))
375: #define pmap_pde_page(pde) l1pte_page_p(*(pde))
376: #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
377:
378: #define pmap_pte_v(pte) l2pte_valid(*(pte))
379: #define pmap_pte_pa(pte) l2pte_pa(*(pte))
380:
381: /* Size of the kernel part of the L1 page table */
382: #define KERNEL_PD_SIZE \
383: (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
384:
385: /************************* ARM MMU configuration *****************************/
386:
387: #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
388: void pmap_copy_page_generic(struct vm_page *, struct vm_page *);
389: void pmap_zero_page_generic(struct vm_page *);
390:
391: void pmap_pte_init_generic(void);
392: #if defined(CPU_ARM8)
393: void pmap_pte_init_arm8(void);
394: #endif
395: #if defined(CPU_ARM9)
396: void pmap_pte_init_arm9(void);
397: #endif /* CPU_ARM9 */
398: #if defined(CPU_ARM10)
399: void pmap_pte_init_arm10(void);
400: #endif /* CPU_ARM10 */
401: #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
402:
403: #if ARM_MMU_SA1 == 1
404: void pmap_pte_init_sa1(void);
405: #endif /* ARM_MMU_SA1 == 1 */
406:
407: #if ARM_MMU_XSCALE == 1
408: void pmap_copy_page_xscale(struct vm_page *, struct vm_page *);
409: void pmap_zero_page_xscale(struct vm_page *);
410:
411: void pmap_pte_init_xscale(void);
412:
413: void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
414:
415: #define PMAP_UAREA(va) pmap_uarea(va)
416: void pmap_uarea(vaddr_t);
417: #endif /* ARM_MMU_XSCALE == 1 */
418:
419: extern pt_entry_t pte_l1_s_cache_mode;
420: extern pt_entry_t pte_l1_s_cache_mask;
421:
422: extern pt_entry_t pte_l2_l_cache_mode;
423: extern pt_entry_t pte_l2_l_cache_mask;
424:
425: extern pt_entry_t pte_l2_s_cache_mode;
426: extern pt_entry_t pte_l2_s_cache_mask;
427:
428: extern pt_entry_t pte_l1_s_cache_mode_pt;
429: extern pt_entry_t pte_l2_l_cache_mode_pt;
430: extern pt_entry_t pte_l2_s_cache_mode_pt;
431:
432: extern pt_entry_t pte_l2_s_prot_u;
433: extern pt_entry_t pte_l2_s_prot_w;
434: extern pt_entry_t pte_l2_s_prot_mask;
435:
436: extern pt_entry_t pte_l1_s_proto;
437: extern pt_entry_t pte_l1_c_proto;
438: extern pt_entry_t pte_l2_s_proto;
439:
440: extern void (*pmap_copy_page_func)(struct vm_page *, struct vm_page *);
441: extern void (*pmap_zero_page_func)(struct vm_page *);
442:
443: #endif /* !_LOCORE */
444:
445: /*****************************************************************************/
446:
447: /*
448: * tell MI code that the cache is virtually-indexed *and* virtually-tagged.
449: */
450: #define PMAP_CACHE_VIVT
451:
452: /*
453: * Definitions for MMU domains
454: */
455: #define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */
456: #define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */
457:
458: /*
459: * These macros define the various bit masks in the PTE.
460: *
461: * We use these macros since we use different bits on different processor
462: * models.
463: */
464: #define L1_S_PROT_U (L1_S_AP(AP_U))
465: #define L1_S_PROT_W (L1_S_AP(AP_W))
466: #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
467:
468: #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
469: #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
470:
471: #define L2_L_PROT_U (L2_AP(AP_U))
472: #define L2_L_PROT_W (L2_AP(AP_W))
473: #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
474:
475: #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
476: #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
477:
478: #define L2_S_PROT_U_generic (L2_AP(AP_U))
479: #define L2_S_PROT_W_generic (L2_AP(AP_W))
480: #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
481:
482: #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
483: #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
484: #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
485:
486: #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
487: #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
488:
489: #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
490: #define L1_S_PROTO_xscale (L1_TYPE_S)
491:
492: #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
493: #define L1_C_PROTO_xscale (L1_TYPE_C)
494:
495: #define L2_L_PROTO (L2_TYPE_L)
496:
497: #define L2_S_PROTO_generic (L2_TYPE_S)
498: #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
499:
500: /*
501: * User-visible names for the ones that vary with MMU class.
502: */
503:
504: #if ARM_NMMUS > 1
505: /* More than one MMU class configured; use variables. */
506: #define L2_S_PROT_U pte_l2_s_prot_u
507: #define L2_S_PROT_W pte_l2_s_prot_w
508: #define L2_S_PROT_MASK pte_l2_s_prot_mask
509:
510: #define L1_S_CACHE_MASK pte_l1_s_cache_mask
511: #define L2_L_CACHE_MASK pte_l2_l_cache_mask
512: #define L2_S_CACHE_MASK pte_l2_s_cache_mask
513:
514: #define L1_S_PROTO pte_l1_s_proto
515: #define L1_C_PROTO pte_l1_c_proto
516: #define L2_S_PROTO pte_l2_s_proto
517:
518: #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
519: #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
520: #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
521: #define L2_S_PROT_U L2_S_PROT_U_generic
522: #define L2_S_PROT_W L2_S_PROT_W_generic
523: #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
524:
525: #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
526: #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
527: #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
528:
529: #define L1_S_PROTO L1_S_PROTO_generic
530: #define L1_C_PROTO L1_C_PROTO_generic
531: #define L2_S_PROTO L2_S_PROTO_generic
532:
533: #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
534: #define pmap_zero_page(d) pmap_zero_page_generic((d))
535: #elif ARM_MMU_XSCALE == 1
536: #define L2_S_PROT_U L2_S_PROT_U_xscale
537: #define L2_S_PROT_W L2_S_PROT_W_xscale
538: #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
539:
540: #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
541: #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
542: #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
543:
544: #define L1_S_PROTO L1_S_PROTO_xscale
545: #define L1_C_PROTO L1_C_PROTO_xscale
546: #define L2_S_PROTO L2_S_PROTO_xscale
547:
548: #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
549: #define pmap_zero_page(d) pmap_zero_page_xscale((d))
550: #endif /* ARM_NMMUS > 1 */
551:
552: /*
553: * These macros return various bits based on kernel/user and protection.
554: * Note that the compiler will usually fold these at compile time.
555: */
556: #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
557: (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
558:
559: #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
560: (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
561:
562: #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
563: (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
564:
565: /*
566: * Macros to test if a mapping is mappable with an L1 Section mapping
567: * or an L2 Large Page mapping.
568: */
569: #define L1_S_MAPPABLE_P(va, pa, size) \
570: ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
571:
572: #define L2_L_MAPPABLE_P(va, pa, size) \
573: ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
574:
575: #endif /* _KERNEL */
576:
577: #endif /* _ARM32_PMAP_H_ */
CVSweb