Annotation of sys/uvm/uvm_page.c, Revision 1.1
1.1 ! nbrk 1: /* $OpenBSD: uvm_page.c,v 1.61 2007/06/18 21:51:15 pedro Exp $ */
! 2: /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
! 3:
! 4: /*
! 5: * Copyright (c) 1997 Charles D. Cranor and Washington University.
! 6: * Copyright (c) 1991, 1993, The Regents of the University of California.
! 7: *
! 8: * All rights reserved.
! 9: *
! 10: * This code is derived from software contributed to Berkeley by
! 11: * The Mach Operating System project at Carnegie-Mellon University.
! 12: *
! 13: * Redistribution and use in source and binary forms, with or without
! 14: * modification, are permitted provided that the following conditions
! 15: * are met:
! 16: * 1. Redistributions of source code must retain the above copyright
! 17: * notice, this list of conditions and the following disclaimer.
! 18: * 2. Redistributions in binary form must reproduce the above copyright
! 19: * notice, this list of conditions and the following disclaimer in the
! 20: * documentation and/or other materials provided with the distribution.
! 21: * 3. All advertising materials mentioning features or use of this software
! 22: * must display the following acknowledgement:
! 23: * This product includes software developed by Charles D. Cranor,
! 24: * Washington University, the University of California, Berkeley and
! 25: * its contributors.
! 26: * 4. Neither the name of the University nor the names of its contributors
! 27: * may be used to endorse or promote products derived from this software
! 28: * without specific prior written permission.
! 29: *
! 30: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
! 31: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
! 32: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
! 33: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
! 34: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
! 35: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
! 36: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
! 37: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
! 38: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
! 39: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
! 40: * SUCH DAMAGE.
! 41: *
! 42: * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
! 43: * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
! 44: *
! 45: *
! 46: * Copyright (c) 1987, 1990 Carnegie-Mellon University.
! 47: * All rights reserved.
! 48: *
! 49: * Permission to use, copy, modify and distribute this software and
! 50: * its documentation is hereby granted, provided that both the copyright
! 51: * notice and this permission notice appear in all copies of the
! 52: * software, derivative works or modified versions, and any portions
! 53: * thereof, and that both notices appear in supporting documentation.
! 54: *
! 55: * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
! 56: * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
! 57: * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
! 58: *
! 59: * Carnegie Mellon requests users of this software to return to
! 60: *
! 61: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
! 62: * School of Computer Science
! 63: * Carnegie Mellon University
! 64: * Pittsburgh PA 15213-3890
! 65: *
! 66: * any improvements or extensions that they make and grant Carnegie the
! 67: * rights to redistribute these changes.
! 68: */
! 69:
! 70: /*
! 71: * uvm_page.c: page ops.
! 72: */
! 73:
! 74: #define UVM_PAGE /* pull in uvm_page.h functions */
! 75: #include <sys/param.h>
! 76: #include <sys/systm.h>
! 77: #include <sys/malloc.h>
! 78: #include <sys/sched.h>
! 79: #include <sys/kernel.h>
! 80: #include <sys/vnode.h>
! 81:
! 82: #include <uvm/uvm.h>
! 83:
! 84: /*
! 85: * global vars... XXXCDC: move to uvm. structure.
! 86: */
! 87:
! 88: /*
! 89: * physical memory config is stored in vm_physmem.
! 90: */
! 91:
! 92: struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
! 93: int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
! 94:
! 95: /*
! 96: * Some supported CPUs in a given architecture don't support all
! 97: * of the things necessary to do idle page zero'ing efficiently.
! 98: * We therefore provide a way to disable it from machdep code here.
! 99: */
! 100:
! 101: /*
! 102: * XXX disabled until we can find a way to do this without causing
! 103: * problems for either cpu caches or DMA latency.
! 104: */
! 105: boolean_t vm_page_zero_enable = FALSE;
! 106:
! 107: /*
! 108: * local variables
! 109: */
! 110:
! 111: /*
! 112: * these variables record the values returned by vm_page_bootstrap,
! 113: * for debugging purposes. The implementation of uvm_pageboot_alloc
! 114: * and pmap_startup here also uses them internally.
! 115: */
! 116:
! 117: static vaddr_t virtual_space_start;
! 118: static vaddr_t virtual_space_end;
! 119:
! 120: /*
! 121: * we use a hash table with only one bucket during bootup. we will
! 122: * later rehash (resize) the hash table once the allocator is ready.
! 123: * we static allocate the one bootstrap bucket below...
! 124: */
! 125:
! 126: static struct pglist uvm_bootbucket;
! 127:
! 128: /*
! 129: * History
! 130: */
! 131: UVMHIST_DECL(pghist);
! 132:
! 133: /*
! 134: * local prototypes
! 135: */
! 136:
! 137: static void uvm_pageinsert(struct vm_page *);
! 138: static void uvm_pageremove(struct vm_page *);
! 139:
! 140: /*
! 141: * inline functions
! 142: */
! 143:
! 144: /*
! 145: * uvm_pageinsert: insert a page in the object and the hash table
! 146: *
! 147: * => caller must lock object
! 148: * => caller must lock page queues
! 149: * => call should have already set pg's object and offset pointers
! 150: * and bumped the version counter
! 151: */
! 152:
! 153: __inline static void
! 154: uvm_pageinsert(struct vm_page *pg)
! 155: {
! 156: struct pglist *buck;
! 157: int s;
! 158: UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist);
! 159:
! 160: KASSERT((pg->pg_flags & PG_TABLED) == 0);
! 161: buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
! 162: s = splvm();
! 163: simple_lock(&uvm.hashlock);
! 164: TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */
! 165: simple_unlock(&uvm.hashlock);
! 166: splx(s);
! 167:
! 168: TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */
! 169: atomic_setbits_int(&pg->pg_flags, PG_TABLED);
! 170: pg->uobject->uo_npages++;
! 171: }
! 172:
! 173: /*
! 174: * uvm_page_remove: remove page from object and hash
! 175: *
! 176: * => caller must lock object
! 177: * => caller must lock page queues
! 178: */
! 179:
! 180: static __inline void
! 181: uvm_pageremove(struct vm_page *pg)
! 182: {
! 183: struct pglist *buck;
! 184: int s;
! 185: UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist);
! 186:
! 187: KASSERT(pg->pg_flags & PG_TABLED);
! 188: buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
! 189: s = splvm();
! 190: simple_lock(&uvm.hashlock);
! 191: TAILQ_REMOVE(buck, pg, hashq);
! 192: simple_unlock(&uvm.hashlock);
! 193: splx(s);
! 194:
! 195: #ifdef UBC
! 196: if (pg->uobject->pgops == &uvm_vnodeops) {
! 197: uvm_pgcnt_vnode--;
! 198: }
! 199: #endif
! 200:
! 201: /* object should be locked */
! 202: TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
! 203:
! 204: atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
! 205: pg->uobject->uo_npages--;
! 206: pg->uobject = NULL;
! 207: pg->pg_version++;
! 208: }
! 209:
! 210: /*
! 211: * uvm_page_init: init the page system. called from uvm_init().
! 212: *
! 213: * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
! 214: */
! 215:
! 216: void
! 217: uvm_page_init(kvm_startp, kvm_endp)
! 218: vaddr_t *kvm_startp, *kvm_endp;
! 219: {
! 220: vsize_t freepages, pagecount, n;
! 221: vm_page_t pagearray;
! 222: int lcv, i;
! 223: paddr_t paddr;
! 224: #if defined(UVMHIST)
! 225: static struct uvm_history_ent pghistbuf[100];
! 226: #endif
! 227:
! 228: UVMHIST_FUNC("uvm_page_init");
! 229: UVMHIST_INIT_STATIC(pghist, pghistbuf);
! 230: UVMHIST_CALLED(pghist);
! 231:
! 232: /*
! 233: * init the page queues and page queue locks
! 234: */
! 235:
! 236: for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
! 237: for (i = 0; i < PGFL_NQUEUES; i++)
! 238: TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
! 239: }
! 240: TAILQ_INIT(&uvm.page_active);
! 241: TAILQ_INIT(&uvm.page_inactive_swp);
! 242: TAILQ_INIT(&uvm.page_inactive_obj);
! 243: simple_lock_init(&uvm.pageqlock);
! 244: simple_lock_init(&uvm.fpageqlock);
! 245:
! 246: /*
! 247: * init the <obj,offset> => <page> hash table. for now
! 248: * we just have one bucket (the bootstrap bucket). later on we
! 249: * will allocate new buckets as we dynamically resize the hash table.
! 250: */
! 251:
! 252: uvm.page_nhash = 1; /* 1 bucket */
! 253: uvm.page_hashmask = 0; /* mask for hash function */
! 254: uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */
! 255: TAILQ_INIT(uvm.page_hash); /* init hash table */
! 256: simple_lock_init(&uvm.hashlock); /* init hash table lock */
! 257:
! 258: /*
! 259: * allocate vm_page structures.
! 260: */
! 261:
! 262: /*
! 263: * sanity check:
! 264: * before calling this function the MD code is expected to register
! 265: * some free RAM with the uvm_page_physload() function. our job
! 266: * now is to allocate vm_page structures for this memory.
! 267: */
! 268:
! 269: if (vm_nphysseg == 0)
! 270: panic("uvm_page_bootstrap: no memory pre-allocated");
! 271:
! 272: /*
! 273: * first calculate the number of free pages...
! 274: *
! 275: * note that we use start/end rather than avail_start/avail_end.
! 276: * this allows us to allocate extra vm_page structures in case we
! 277: * want to return some memory to the pool after booting.
! 278: */
! 279:
! 280: freepages = 0;
! 281: for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
! 282: freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
! 283:
! 284: /*
! 285: * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
! 286: * use. for each page of memory we use we need a vm_page structure.
! 287: * thus, the total number of pages we can use is the total size of
! 288: * the memory divided by the PAGE_SIZE plus the size of the vm_page
! 289: * structure. we add one to freepages as a fudge factor to avoid
! 290: * truncation errors (since we can only allocate in terms of whole
! 291: * pages).
! 292: */
! 293:
! 294: pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) /
! 295: (PAGE_SIZE + sizeof(struct vm_page));
! 296: pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
! 297: sizeof(struct vm_page));
! 298: memset(pagearray, 0, pagecount * sizeof(struct vm_page));
! 299:
! 300: /*
! 301: * init the vm_page structures and put them in the correct place.
! 302: */
! 303:
! 304: for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
! 305: n = vm_physmem[lcv].end - vm_physmem[lcv].start;
! 306: if (n > pagecount) {
! 307: printf("uvm_page_init: lost %ld page(s) in init\n",
! 308: (long)(n - pagecount));
! 309: panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */
! 310: /* n = pagecount; */
! 311: }
! 312:
! 313: /* set up page array pointers */
! 314: vm_physmem[lcv].pgs = pagearray;
! 315: pagearray += n;
! 316: pagecount -= n;
! 317: vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
! 318:
! 319: /* init and free vm_pages (we've already zeroed them) */
! 320: paddr = ptoa(vm_physmem[lcv].start);
! 321: for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
! 322: vm_physmem[lcv].pgs[i].phys_addr = paddr;
! 323: #ifdef __HAVE_VM_PAGE_MD
! 324: VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
! 325: #endif
! 326: if (atop(paddr) >= vm_physmem[lcv].avail_start &&
! 327: atop(paddr) <= vm_physmem[lcv].avail_end) {
! 328: uvmexp.npages++;
! 329: /* add page to free pool */
! 330: uvm_pagefree(&vm_physmem[lcv].pgs[i]);
! 331: }
! 332: }
! 333: }
! 334:
! 335: /*
! 336: * pass up the values of virtual_space_start and
! 337: * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
! 338: * layers of the VM.
! 339: */
! 340:
! 341: *kvm_startp = round_page(virtual_space_start);
! 342: *kvm_endp = trunc_page(virtual_space_end);
! 343:
! 344: /*
! 345: * init locks for kernel threads
! 346: */
! 347:
! 348: simple_lock_init(&uvm.pagedaemon_lock);
! 349: simple_lock_init(&uvm.aiodoned_lock);
! 350:
! 351: /*
! 352: * init reserve thresholds
! 353: * XXXCDC - values may need adjusting
! 354: */
! 355: uvmexp.reserve_pagedaemon = 4;
! 356: uvmexp.reserve_kernel = 6;
! 357: uvmexp.anonminpct = 10;
! 358: uvmexp.vnodeminpct = 10;
! 359: uvmexp.vtextminpct = 5;
! 360: uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
! 361: uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
! 362: uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
! 363:
! 364: /*
! 365: * determine if we should zero pages in the idle loop.
! 366: */
! 367:
! 368: uvm.page_idle_zero = vm_page_zero_enable;
! 369:
! 370: /*
! 371: * done!
! 372: */
! 373:
! 374: uvm.page_init_done = TRUE;
! 375: }
! 376:
! 377: /*
! 378: * uvm_setpagesize: set the page size
! 379: *
! 380: * => sets page_shift and page_mask from uvmexp.pagesize.
! 381: */
! 382:
! 383: void
! 384: uvm_setpagesize()
! 385: {
! 386: if (uvmexp.pagesize == 0)
! 387: uvmexp.pagesize = DEFAULT_PAGE_SIZE;
! 388: uvmexp.pagemask = uvmexp.pagesize - 1;
! 389: if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
! 390: panic("uvm_setpagesize: page size not a power of two");
! 391: for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
! 392: if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
! 393: break;
! 394: }
! 395:
! 396: /*
! 397: * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
! 398: */
! 399:
! 400: vaddr_t
! 401: uvm_pageboot_alloc(size)
! 402: vsize_t size;
! 403: {
! 404: #if defined(PMAP_STEAL_MEMORY)
! 405: vaddr_t addr;
! 406:
! 407: /*
! 408: * defer bootstrap allocation to MD code (it may want to allocate
! 409: * from a direct-mapped segment). pmap_steal_memory should round
! 410: * off virtual_space_start/virtual_space_end.
! 411: */
! 412:
! 413: addr = pmap_steal_memory(size, &virtual_space_start,
! 414: &virtual_space_end);
! 415:
! 416: return(addr);
! 417:
! 418: #else /* !PMAP_STEAL_MEMORY */
! 419:
! 420: static boolean_t initialized = FALSE;
! 421: vaddr_t addr, vaddr;
! 422: paddr_t paddr;
! 423:
! 424: /* round to page size */
! 425: size = round_page(size);
! 426:
! 427: /*
! 428: * on first call to this function, initialize ourselves.
! 429: */
! 430: if (initialized == FALSE) {
! 431: pmap_virtual_space(&virtual_space_start, &virtual_space_end);
! 432:
! 433: /* round it the way we like it */
! 434: virtual_space_start = round_page(virtual_space_start);
! 435: virtual_space_end = trunc_page(virtual_space_end);
! 436:
! 437: initialized = TRUE;
! 438: }
! 439:
! 440: /*
! 441: * allocate virtual memory for this request
! 442: */
! 443: if (virtual_space_start == virtual_space_end ||
! 444: (virtual_space_end - virtual_space_start) < size)
! 445: panic("uvm_pageboot_alloc: out of virtual space");
! 446:
! 447: addr = virtual_space_start;
! 448:
! 449: #ifdef PMAP_GROWKERNEL
! 450: /*
! 451: * If the kernel pmap can't map the requested space,
! 452: * then allocate more resources for it.
! 453: */
! 454: if (uvm_maxkaddr < (addr + size)) {
! 455: uvm_maxkaddr = pmap_growkernel(addr + size);
! 456: if (uvm_maxkaddr < (addr + size))
! 457: panic("uvm_pageboot_alloc: pmap_growkernel() failed");
! 458: }
! 459: #endif
! 460:
! 461: virtual_space_start += size;
! 462:
! 463: /*
! 464: * allocate and mapin physical pages to back new virtual pages
! 465: */
! 466:
! 467: for (vaddr = round_page(addr) ; vaddr < addr + size ;
! 468: vaddr += PAGE_SIZE) {
! 469:
! 470: if (!uvm_page_physget(&paddr))
! 471: panic("uvm_pageboot_alloc: out of memory");
! 472:
! 473: /*
! 474: * Note this memory is no longer managed, so using
! 475: * pmap_kenter is safe.
! 476: */
! 477: pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
! 478: }
! 479: pmap_update(pmap_kernel());
! 480: return(addr);
! 481: #endif /* PMAP_STEAL_MEMORY */
! 482: }
! 483:
! 484: #if !defined(PMAP_STEAL_MEMORY)
! 485: /*
! 486: * uvm_page_physget: "steal" one page from the vm_physmem structure.
! 487: *
! 488: * => attempt to allocate it off the end of a segment in which the "avail"
! 489: * values match the start/end values. if we can't do that, then we
! 490: * will advance both values (making them equal, and removing some
! 491: * vm_page structures from the non-avail area).
! 492: * => return false if out of memory.
! 493: */
! 494:
! 495: /* subroutine: try to allocate from memory chunks on the specified freelist */
! 496: static boolean_t uvm_page_physget_freelist(paddr_t *, int);
! 497:
! 498: static boolean_t
! 499: uvm_page_physget_freelist(paddrp, freelist)
! 500: paddr_t *paddrp;
! 501: int freelist;
! 502: {
! 503: int lcv, x;
! 504: UVMHIST_FUNC("uvm_page_physget_freelist"); UVMHIST_CALLED(pghist);
! 505:
! 506: /* pass 1: try allocating from a matching end */
! 507: #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
! 508: (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
! 509: for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
! 510: #else
! 511: for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
! 512: #endif
! 513: {
! 514:
! 515: if (uvm.page_init_done == TRUE)
! 516: panic("uvm_page_physget: called _after_ bootstrap");
! 517:
! 518: if (vm_physmem[lcv].free_list != freelist)
! 519: continue;
! 520:
! 521: /* try from front */
! 522: if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
! 523: vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
! 524: *paddrp = ptoa(vm_physmem[lcv].avail_start);
! 525: vm_physmem[lcv].avail_start++;
! 526: vm_physmem[lcv].start++;
! 527: /* nothing left? nuke it */
! 528: if (vm_physmem[lcv].avail_start ==
! 529: vm_physmem[lcv].end) {
! 530: if (vm_nphysseg == 1)
! 531: panic("uvm_page_physget: out of memory!");
! 532: vm_nphysseg--;
! 533: for (x = lcv ; x < vm_nphysseg ; x++)
! 534: /* structure copy */
! 535: vm_physmem[x] = vm_physmem[x+1];
! 536: }
! 537: return (TRUE);
! 538: }
! 539:
! 540: /* try from rear */
! 541: if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
! 542: vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
! 543: *paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
! 544: vm_physmem[lcv].avail_end--;
! 545: vm_physmem[lcv].end--;
! 546: /* nothing left? nuke it */
! 547: if (vm_physmem[lcv].avail_end ==
! 548: vm_physmem[lcv].start) {
! 549: if (vm_nphysseg == 1)
! 550: panic("uvm_page_physget: out of memory!");
! 551: vm_nphysseg--;
! 552: for (x = lcv ; x < vm_nphysseg ; x++)
! 553: /* structure copy */
! 554: vm_physmem[x] = vm_physmem[x+1];
! 555: }
! 556: return (TRUE);
! 557: }
! 558: }
! 559:
! 560: /* pass2: forget about matching ends, just allocate something */
! 561: #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
! 562: (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
! 563: for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
! 564: #else
! 565: for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
! 566: #endif
! 567: {
! 568:
! 569: /* any room in this bank? */
! 570: if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
! 571: continue; /* nope */
! 572:
! 573: *paddrp = ptoa(vm_physmem[lcv].avail_start);
! 574: vm_physmem[lcv].avail_start++;
! 575: /* truncate! */
! 576: vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
! 577:
! 578: /* nothing left? nuke it */
! 579: if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
! 580: if (vm_nphysseg == 1)
! 581: panic("uvm_page_physget: out of memory!");
! 582: vm_nphysseg--;
! 583: for (x = lcv ; x < vm_nphysseg ; x++)
! 584: /* structure copy */
! 585: vm_physmem[x] = vm_physmem[x+1];
! 586: }
! 587: return (TRUE);
! 588: }
! 589:
! 590: return (FALSE); /* whoops! */
! 591: }
! 592:
! 593: boolean_t
! 594: uvm_page_physget(paddrp)
! 595: paddr_t *paddrp;
! 596: {
! 597: int i;
! 598: UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist);
! 599:
! 600: /* try in the order of freelist preference */
! 601: for (i = 0; i < VM_NFREELIST; i++)
! 602: if (uvm_page_physget_freelist(paddrp, i) == TRUE)
! 603: return (TRUE);
! 604: return (FALSE);
! 605: }
! 606: #endif /* PMAP_STEAL_MEMORY */
! 607:
! 608: /*
! 609: * uvm_page_physload: load physical memory into VM system
! 610: *
! 611: * => all args are PFs
! 612: * => all pages in start/end get vm_page structures
! 613: * => areas marked by avail_start/avail_end get added to the free page pool
! 614: * => we are limited to VM_PHYSSEG_MAX physical memory segments
! 615: */
! 616:
! 617: void
! 618: uvm_page_physload(start, end, avail_start, avail_end, free_list)
! 619: paddr_t start, end, avail_start, avail_end;
! 620: int free_list;
! 621: {
! 622: int preload, lcv;
! 623: psize_t npages;
! 624: struct vm_page *pgs;
! 625: struct vm_physseg *ps;
! 626:
! 627: if (uvmexp.pagesize == 0)
! 628: panic("uvm_page_physload: page size not set!");
! 629:
! 630: if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
! 631: panic("uvm_page_physload: bad free list %d", free_list);
! 632:
! 633: if (start >= end)
! 634: panic("uvm_page_physload: start >= end");
! 635:
! 636: /*
! 637: * do we have room?
! 638: */
! 639: if (vm_nphysseg == VM_PHYSSEG_MAX) {
! 640: printf("uvm_page_physload: unable to load physical memory "
! 641: "segment\n");
! 642: printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
! 643: VM_PHYSSEG_MAX, (long long)start, (long long)end);
! 644: printf("\tincrease VM_PHYSSEG_MAX\n");
! 645: return;
! 646: }
! 647:
! 648: /*
! 649: * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
! 650: * called yet, so malloc is not available).
! 651: */
! 652: for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
! 653: if (vm_physmem[lcv].pgs)
! 654: break;
! 655: }
! 656: preload = (lcv == vm_nphysseg);
! 657:
! 658: /*
! 659: * if VM is already running, attempt to malloc() vm_page structures
! 660: */
! 661: if (!preload) {
! 662: #if defined(VM_PHYSSEG_NOADD)
! 663: panic("uvm_page_physload: tried to add RAM after vm_mem_init");
! 664: #else
! 665: /* XXXCDC: need some sort of lockout for this case */
! 666: paddr_t paddr;
! 667: npages = end - start; /* # of pages */
! 668: pgs = (vm_page *)uvm_km_alloc(kernel_map,
! 669: sizeof(struct vm_page) * npages);
! 670: if (pgs == NULL) {
! 671: printf("uvm_page_physload: can not malloc vm_page "
! 672: "structs for segment\n");
! 673: printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
! 674: return;
! 675: }
! 676: /* zero data, init phys_addr and free_list, and free pages */
! 677: memset(pgs, 0, sizeof(struct vm_page) * npages);
! 678: for (lcv = 0, paddr = ptoa(start) ;
! 679: lcv < npages ; lcv++, paddr += PAGE_SIZE) {
! 680: pgs[lcv].phys_addr = paddr;
! 681: pgs[lcv].free_list = free_list;
! 682: if (atop(paddr) >= avail_start &&
! 683: atop(paddr) <= avail_end)
! 684: uvm_pagefree(&pgs[lcv]);
! 685: }
! 686: /* XXXCDC: incomplete: need to update uvmexp.free, what else? */
! 687: /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
! 688: #endif
! 689: } else {
! 690:
! 691: /* gcc complains if these don't get init'd */
! 692: pgs = NULL;
! 693: npages = 0;
! 694:
! 695: }
! 696:
! 697: /*
! 698: * now insert us in the proper place in vm_physmem[]
! 699: */
! 700:
! 701: #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
! 702:
! 703: /* random: put it at the end (easy!) */
! 704: ps = &vm_physmem[vm_nphysseg];
! 705:
! 706: #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
! 707:
! 708: {
! 709: int x;
! 710: /* sort by address for binary search */
! 711: for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
! 712: if (start < vm_physmem[lcv].start)
! 713: break;
! 714: ps = &vm_physmem[lcv];
! 715: /* move back other entries, if necessary ... */
! 716: for (x = vm_nphysseg ; x > lcv ; x--)
! 717: /* structure copy */
! 718: vm_physmem[x] = vm_physmem[x - 1];
! 719: }
! 720:
! 721: #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
! 722:
! 723: {
! 724: int x;
! 725: /* sort by largest segment first */
! 726: for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
! 727: if ((end - start) >
! 728: (vm_physmem[lcv].end - vm_physmem[lcv].start))
! 729: break;
! 730: ps = &vm_physmem[lcv];
! 731: /* move back other entries, if necessary ... */
! 732: for (x = vm_nphysseg ; x > lcv ; x--)
! 733: /* structure copy */
! 734: vm_physmem[x] = vm_physmem[x - 1];
! 735: }
! 736:
! 737: #else
! 738:
! 739: panic("uvm_page_physload: unknown physseg strategy selected!");
! 740:
! 741: #endif
! 742:
! 743: ps->start = start;
! 744: ps->end = end;
! 745: ps->avail_start = avail_start;
! 746: ps->avail_end = avail_end;
! 747: if (preload) {
! 748: ps->pgs = NULL;
! 749: } else {
! 750: ps->pgs = pgs;
! 751: ps->lastpg = pgs + npages - 1;
! 752: }
! 753: ps->free_list = free_list;
! 754: vm_nphysseg++;
! 755:
! 756: /*
! 757: * done!
! 758: */
! 759:
! 760: if (!preload)
! 761: uvm_page_rehash();
! 762:
! 763: return;
! 764: }
! 765:
! 766: /*
! 767: * uvm_page_rehash: reallocate hash table based on number of free pages.
! 768: */
! 769:
! 770: void
! 771: uvm_page_rehash()
! 772: {
! 773: int freepages, lcv, bucketcount, s, oldcount;
! 774: struct pglist *newbuckets, *oldbuckets;
! 775: struct vm_page *pg;
! 776: size_t newsize, oldsize;
! 777:
! 778: /*
! 779: * compute number of pages that can go in the free pool
! 780: */
! 781:
! 782: freepages = 0;
! 783: for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
! 784: freepages +=
! 785: (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
! 786:
! 787: /*
! 788: * compute number of buckets needed for this number of pages
! 789: */
! 790:
! 791: bucketcount = 1;
! 792: while (bucketcount < freepages)
! 793: bucketcount = bucketcount * 2;
! 794:
! 795: /*
! 796: * compute the size of the current table and new table.
! 797: */
! 798:
! 799: oldbuckets = uvm.page_hash;
! 800: oldcount = uvm.page_nhash;
! 801: oldsize = round_page(sizeof(struct pglist) * oldcount);
! 802: newsize = round_page(sizeof(struct pglist) * bucketcount);
! 803:
! 804: /*
! 805: * allocate the new buckets
! 806: */
! 807:
! 808: newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize);
! 809: if (newbuckets == NULL) {
! 810: printf("uvm_page_physrehash: WARNING: could not grow page "
! 811: "hash table\n");
! 812: return;
! 813: }
! 814: for (lcv = 0 ; lcv < bucketcount ; lcv++)
! 815: TAILQ_INIT(&newbuckets[lcv]);
! 816:
! 817: /*
! 818: * now replace the old buckets with the new ones and rehash everything
! 819: */
! 820:
! 821: s = splvm();
! 822: simple_lock(&uvm.hashlock);
! 823: uvm.page_hash = newbuckets;
! 824: uvm.page_nhash = bucketcount;
! 825: uvm.page_hashmask = bucketcount - 1; /* power of 2 */
! 826:
! 827: /* ... and rehash */
! 828: for (lcv = 0 ; lcv < oldcount ; lcv++) {
! 829: while ((pg = TAILQ_FIRST(&oldbuckets[lcv])) != NULL) {
! 830: TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
! 831: TAILQ_INSERT_TAIL(
! 832: &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
! 833: pg, hashq);
! 834: }
! 835: }
! 836: simple_unlock(&uvm.hashlock);
! 837: splx(s);
! 838:
! 839: /*
! 840: * free old bucket array if is not the boot-time table
! 841: */
! 842:
! 843: if (oldbuckets != &uvm_bootbucket)
! 844: uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize);
! 845:
! 846: /*
! 847: * done
! 848: */
! 849: return;
! 850: }
! 851:
! 852:
! 853: #if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
! 854:
! 855: void uvm_page_physdump(void); /* SHUT UP GCC */
! 856:
! 857: /* call from DDB */
! 858: void
! 859: uvm_page_physdump()
! 860: {
! 861: int lcv;
! 862:
! 863: printf("rehash: physical memory config [segs=%d of %d]:\n",
! 864: vm_nphysseg, VM_PHYSSEG_MAX);
! 865: for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
! 866: printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
! 867: (long long)vm_physmem[lcv].start,
! 868: (long long)vm_physmem[lcv].end,
! 869: (long long)vm_physmem[lcv].avail_start,
! 870: (long long)vm_physmem[lcv].avail_end);
! 871: printf("STRATEGY = ");
! 872: switch (VM_PHYSSEG_STRAT) {
! 873: case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
! 874: case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
! 875: case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
! 876: default: printf("<<UNKNOWN>>!!!!\n");
! 877: }
! 878: printf("number of buckets = %d\n", uvm.page_nhash);
! 879: }
! 880: #endif
! 881:
! 882: /*
! 883: * uvm_pagealloc_strat: allocate vm_page from a particular free list.
! 884: *
! 885: * => return null if no pages free
! 886: * => wake up pagedaemon if number of free pages drops below low water mark
! 887: * => if obj != NULL, obj must be locked (to put in hash)
! 888: * => if anon != NULL, anon must be locked (to put in anon)
! 889: * => only one of obj or anon can be non-null
! 890: * => caller must activate/deactivate page if it is not wired.
! 891: * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
! 892: * => policy decision: it is more important to pull a page off of the
! 893: * appropriate priority free list than it is to get a zero'd or
! 894: * unknown contents page. This is because we live with the
! 895: * consequences of a bad free list decision for the entire
! 896: * lifetime of the page, e.g. if the page comes from memory that
! 897: * is slower to access.
! 898: */
! 899:
! 900: struct vm_page *
! 901: uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
! 902: struct uvm_object *obj;
! 903: voff_t off;
! 904: int flags;
! 905: struct vm_anon *anon;
! 906: int strat, free_list;
! 907: {
! 908: int lcv, try1, try2, s, zeroit = 0;
! 909: struct vm_page *pg;
! 910: struct pglist *freeq;
! 911: struct pgfreelist *pgfl;
! 912: boolean_t use_reserve;
! 913: UVMHIST_FUNC("uvm_pagealloc_strat"); UVMHIST_CALLED(pghist);
! 914:
! 915: KASSERT(obj == NULL || anon == NULL);
! 916: KASSERT(off == trunc_page(off));
! 917: s = uvm_lock_fpageq();
! 918:
! 919: /*
! 920: * check to see if we need to generate some free pages waking
! 921: * the pagedaemon.
! 922: */
! 923:
! 924: #ifdef UBC
! 925: if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
! 926: (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
! 927: uvmexp.inactive < uvmexp.inactarg)) {
! 928: wakeup(&uvm.pagedaemon);
! 929: }
! 930: #else
! 931: if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg &&
! 932: uvmexp.inactive < uvmexp.inactarg))
! 933: wakeup(&uvm.pagedaemon);
! 934: #endif
! 935:
! 936: /*
! 937: * fail if any of these conditions is true:
! 938: * [1] there really are no free pages, or
! 939: * [2] only kernel "reserved" pages remain and
! 940: * the page isn't being allocated to a kernel object.
! 941: * [3] only pagedaemon "reserved" pages remain and
! 942: * the requestor isn't the pagedaemon.
! 943: */
! 944:
! 945: use_reserve = (flags & UVM_PGA_USERESERVE) ||
! 946: (obj && UVM_OBJ_IS_KERN_OBJECT(obj));
! 947: if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
! 948: (uvmexp.free <= uvmexp.reserve_pagedaemon &&
! 949: !(use_reserve && (curproc == uvm.pagedaemon_proc ||
! 950: curproc == syncerproc))))
! 951: goto fail;
! 952:
! 953: #if PGFL_NQUEUES != 2
! 954: #error uvm_pagealloc_strat needs to be updated
! 955: #endif
! 956:
! 957: /*
! 958: * If we want a zero'd page, try the ZEROS queue first, otherwise
! 959: * we try the UNKNOWN queue first.
! 960: */
! 961: if (flags & UVM_PGA_ZERO) {
! 962: try1 = PGFL_ZEROS;
! 963: try2 = PGFL_UNKNOWN;
! 964: } else {
! 965: try1 = PGFL_UNKNOWN;
! 966: try2 = PGFL_ZEROS;
! 967: }
! 968:
! 969: UVMHIST_LOG(pghist, "obj=%p off=%lx anon=%p flags=%lx",
! 970: obj, (u_long)off, anon, flags);
! 971: UVMHIST_LOG(pghist, "strat=%ld free_list=%ld", strat, free_list, 0, 0);
! 972: again:
! 973: switch (strat) {
! 974: case UVM_PGA_STRAT_NORMAL:
! 975: /* Check all freelists in descending priority order. */
! 976: for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
! 977: pgfl = &uvm.page_free[lcv];
! 978: if ((pg = TAILQ_FIRST((freeq =
! 979: &pgfl->pgfl_queues[try1]))) != NULL ||
! 980: (pg = TAILQ_FIRST((freeq =
! 981: &pgfl->pgfl_queues[try2]))) != NULL)
! 982: goto gotit;
! 983: }
! 984:
! 985: /* No pages free! */
! 986: goto fail;
! 987:
! 988: case UVM_PGA_STRAT_ONLY:
! 989: case UVM_PGA_STRAT_FALLBACK:
! 990: /* Attempt to allocate from the specified free list. */
! 991: KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
! 992: pgfl = &uvm.page_free[free_list];
! 993: if ((pg = TAILQ_FIRST((freeq =
! 994: &pgfl->pgfl_queues[try1]))) != NULL ||
! 995: (pg = TAILQ_FIRST((freeq =
! 996: &pgfl->pgfl_queues[try2]))) != NULL)
! 997: goto gotit;
! 998:
! 999: /* Fall back, if possible. */
! 1000: if (strat == UVM_PGA_STRAT_FALLBACK) {
! 1001: strat = UVM_PGA_STRAT_NORMAL;
! 1002: goto again;
! 1003: }
! 1004:
! 1005: /* No pages free! */
! 1006: goto fail;
! 1007:
! 1008: default:
! 1009: panic("uvm_pagealloc_strat: bad strat %d", strat);
! 1010: /* NOTREACHED */
! 1011: }
! 1012:
! 1013: gotit:
! 1014: TAILQ_REMOVE(freeq, pg, pageq);
! 1015: uvmexp.free--;
! 1016:
! 1017: /* update zero'd page count */
! 1018: if (pg->pg_flags & PG_ZERO)
! 1019: uvmexp.zeropages--;
! 1020:
! 1021: /*
! 1022: * update allocation statistics and remember if we have to
! 1023: * zero the page
! 1024: */
! 1025: if (flags & UVM_PGA_ZERO) {
! 1026: if (pg->pg_flags & PG_ZERO) {
! 1027: uvmexp.pga_zerohit++;
! 1028: zeroit = 0;
! 1029: } else {
! 1030: uvmexp.pga_zeromiss++;
! 1031: zeroit = 1;
! 1032: }
! 1033: }
! 1034:
! 1035: uvm_unlock_fpageq(s); /* unlock free page queue */
! 1036:
! 1037: pg->offset = off;
! 1038: pg->uobject = obj;
! 1039: pg->uanon = anon;
! 1040: pg->pg_flags = PG_BUSY|PG_CLEAN|PG_FAKE;
! 1041: pg->pg_version++;
! 1042: if (anon) {
! 1043: anon->an_page = pg;
! 1044: atomic_setbits_int(&pg->pg_flags, PQ_ANON);
! 1045: #ifdef UBC
! 1046: uvm_pgcnt_anon++;
! 1047: #endif
! 1048: } else {
! 1049: if (obj)
! 1050: uvm_pageinsert(pg);
! 1051: }
! 1052: #if defined(UVM_PAGE_TRKOWN)
! 1053: pg->owner_tag = NULL;
! 1054: #endif
! 1055: UVM_PAGE_OWN(pg, "new alloc");
! 1056:
! 1057: if (flags & UVM_PGA_ZERO) {
! 1058: /*
! 1059: * A zero'd page is not clean. If we got a page not already
! 1060: * zero'd, then we have to zero it ourselves.
! 1061: */
! 1062: atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
! 1063: if (zeroit)
! 1064: pmap_zero_page(pg);
! 1065: }
! 1066:
! 1067: UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg,
! 1068: (u_long)VM_PAGE_TO_PHYS(pg), 0, 0);
! 1069: return(pg);
! 1070:
! 1071: fail:
! 1072: uvm_unlock_fpageq(s);
! 1073: UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0);
! 1074: return (NULL);
! 1075: }
! 1076:
! 1077: /*
! 1078: * uvm_pagerealloc: reallocate a page from one object to another
! 1079: *
! 1080: * => both objects must be locked
! 1081: */
! 1082:
! 1083: void
! 1084: uvm_pagerealloc(pg, newobj, newoff)
! 1085: struct vm_page *pg;
! 1086: struct uvm_object *newobj;
! 1087: voff_t newoff;
! 1088: {
! 1089:
! 1090: UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist);
! 1091:
! 1092: /*
! 1093: * remove it from the old object
! 1094: */
! 1095:
! 1096: if (pg->uobject) {
! 1097: uvm_pageremove(pg);
! 1098: }
! 1099:
! 1100: /*
! 1101: * put it in the new object
! 1102: */
! 1103:
! 1104: if (newobj) {
! 1105: pg->uobject = newobj;
! 1106: pg->offset = newoff;
! 1107: pg->pg_version++;
! 1108: uvm_pageinsert(pg);
! 1109: }
! 1110: }
! 1111:
! 1112:
! 1113: /*
! 1114: * uvm_pagefree: free page
! 1115: *
! 1116: * => erase page's identity (i.e. remove from hash/object)
! 1117: * => put page on free list
! 1118: * => caller must lock owning object (either anon or uvm_object)
! 1119: * => caller must lock page queues
! 1120: * => assumes all valid mappings of pg are gone
! 1121: */
! 1122:
! 1123: void
! 1124: uvm_pagefree(struct vm_page *pg)
! 1125: {
! 1126: int s;
! 1127: int saved_loan_count = pg->loan_count;
! 1128: UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist);
! 1129:
! 1130: #ifdef DEBUG
! 1131: if (pg->uobject == (void *)0xdeadbeef &&
! 1132: pg->uanon == (void *)0xdeadbeef) {
! 1133: panic("uvm_pagefree: freeing free page %p", pg);
! 1134: }
! 1135: #endif
! 1136:
! 1137: UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg,
! 1138: (u_long)VM_PAGE_TO_PHYS(pg), 0, 0);
! 1139:
! 1140: /*
! 1141: * if the page was an object page (and thus "TABLED"), remove it
! 1142: * from the object.
! 1143: */
! 1144:
! 1145: if (pg->pg_flags & PG_TABLED) {
! 1146:
! 1147: /*
! 1148: * if the object page is on loan we are going to drop ownership.
! 1149: * it is possible that an anon will take over as owner for this
! 1150: * page later on. the anon will want a !PG_CLEAN page so that
! 1151: * it knows it needs to allocate swap if it wants to page the
! 1152: * page out.
! 1153: */
! 1154:
! 1155: /* in case an anon takes over */
! 1156: if (saved_loan_count)
! 1157: atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
! 1158: uvm_pageremove(pg);
! 1159:
! 1160: /*
! 1161: * if our page was on loan, then we just lost control over it
! 1162: * (in fact, if it was loaned to an anon, the anon may have
! 1163: * already taken over ownership of the page by now and thus
! 1164: * changed the loan_count [e.g. in uvmfault_anonget()]) we just
! 1165: * return (when the last loan is dropped, then the page can be
! 1166: * freed by whatever was holding the last loan).
! 1167: */
! 1168:
! 1169: if (saved_loan_count)
! 1170: return;
! 1171: } else if (saved_loan_count && pg->uanon) {
! 1172: /*
! 1173: * if our page is owned by an anon and is loaned out to the
! 1174: * kernel then we just want to drop ownership and return.
! 1175: * the kernel must free the page when all its loans clear ...
! 1176: * note that the kernel can't change the loan status of our
! 1177: * page as long as we are holding PQ lock.
! 1178: */
! 1179: atomic_clearbits_int(&pg->pg_flags, PQ_ANON);
! 1180: pg->uanon->an_page = NULL;
! 1181: pg->uanon = NULL;
! 1182: return;
! 1183: }
! 1184: KASSERT(saved_loan_count == 0);
! 1185:
! 1186: /*
! 1187: * now remove the page from the queues
! 1188: */
! 1189:
! 1190: if (pg->pg_flags & PQ_ACTIVE) {
! 1191: TAILQ_REMOVE(&uvm.page_active, pg, pageq);
! 1192: atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
! 1193: uvmexp.active--;
! 1194: }
! 1195: if (pg->pg_flags & PQ_INACTIVE) {
! 1196: if (pg->pg_flags & PQ_SWAPBACKED)
! 1197: TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
! 1198: else
! 1199: TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
! 1200: atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
! 1201: uvmexp.inactive--;
! 1202: }
! 1203:
! 1204: /*
! 1205: * if the page was wired, unwire it now.
! 1206: */
! 1207:
! 1208: if (pg->wire_count) {
! 1209: pg->wire_count = 0;
! 1210: uvmexp.wired--;
! 1211: }
! 1212: if (pg->uanon) {
! 1213: pg->uanon->an_page = NULL;
! 1214: #ifdef UBC
! 1215: uvm_pgcnt_anon--;
! 1216: #endif
! 1217: }
! 1218:
! 1219: /*
! 1220: * and put on free queue
! 1221: */
! 1222:
! 1223: atomic_clearbits_int(&pg->pg_flags, PG_ZERO);
! 1224:
! 1225: s = uvm_lock_fpageq();
! 1226: TAILQ_INSERT_TAIL(&uvm.page_free[
! 1227: uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
! 1228: atomic_clearbits_int(&pg->pg_flags, PQ_MASK);
! 1229: atomic_setbits_int(&pg->pg_flags, PQ_FREE);
! 1230: #ifdef DEBUG
! 1231: pg->uobject = (void *)0xdeadbeef;
! 1232: pg->offset = 0xdeadbeef;
! 1233: pg->uanon = (void *)0xdeadbeef;
! 1234: #endif
! 1235: uvmexp.free++;
! 1236:
! 1237: if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
! 1238: uvm.page_idle_zero = vm_page_zero_enable;
! 1239:
! 1240: uvm_unlock_fpageq(s);
! 1241: }
! 1242:
! 1243: /*
! 1244: * uvm_page_unbusy: unbusy an array of pages.
! 1245: *
! 1246: * => pages must either all belong to the same object, or all belong to anons.
! 1247: * => if pages are object-owned, object must be locked.
! 1248: * => if pages are anon-owned, anons must be unlockd and have 0 refcount.
! 1249: */
! 1250:
! 1251: void
! 1252: uvm_page_unbusy(pgs, npgs)
! 1253: struct vm_page **pgs;
! 1254: int npgs;
! 1255: {
! 1256: struct vm_page *pg;
! 1257: struct uvm_object *uobj;
! 1258: int i;
! 1259: UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist);
! 1260:
! 1261: for (i = 0; i < npgs; i++) {
! 1262: pg = pgs[i];
! 1263:
! 1264: if (pg == NULL || pg == PGO_DONTCARE) {
! 1265: continue;
! 1266: }
! 1267: if (pg->pg_flags & PG_WANTED) {
! 1268: wakeup(pg);
! 1269: }
! 1270: if (pg->pg_flags & PG_RELEASED) {
! 1271: UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0);
! 1272: uobj = pg->uobject;
! 1273: if (uobj != NULL) {
! 1274: uobj->pgops->pgo_releasepg(pg, NULL);
! 1275: } else {
! 1276: atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
! 1277: UVM_PAGE_OWN(pg, NULL);
! 1278: uvm_anfree(pg->uanon);
! 1279: }
! 1280: } else {
! 1281: UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0);
! 1282: atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
! 1283: UVM_PAGE_OWN(pg, NULL);
! 1284: }
! 1285: }
! 1286: }
! 1287:
! 1288: #if defined(UVM_PAGE_TRKOWN)
! 1289: /*
! 1290: * uvm_page_own: set or release page ownership
! 1291: *
! 1292: * => this is a debugging function that keeps track of who sets PG_BUSY
! 1293: * and where they do it. it can be used to track down problems
! 1294: * such a process setting "PG_BUSY" and never releasing it.
! 1295: * => page's object [if any] must be locked
! 1296: * => if "tag" is NULL then we are releasing page ownership
! 1297: */
! 1298: void
! 1299: uvm_page_own(pg, tag)
! 1300: struct vm_page *pg;
! 1301: char *tag;
! 1302: {
! 1303: /* gain ownership? */
! 1304: if (tag) {
! 1305: if (pg->owner_tag) {
! 1306: printf("uvm_page_own: page %p already owned "
! 1307: "by proc %d [%s]\n", pg,
! 1308: pg->owner, pg->owner_tag);
! 1309: panic("uvm_page_own");
! 1310: }
! 1311: pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1;
! 1312: pg->owner_tag = tag;
! 1313: return;
! 1314: }
! 1315:
! 1316: /* drop ownership */
! 1317: if (pg->owner_tag == NULL) {
! 1318: printf("uvm_page_own: dropping ownership of an non-owned "
! 1319: "page (%p)\n", pg);
! 1320: panic("uvm_page_own");
! 1321: }
! 1322: pg->owner_tag = NULL;
! 1323: return;
! 1324: }
! 1325: #endif
! 1326:
! 1327: /*
! 1328: * uvm_pageidlezero: zero free pages while the system is idle.
! 1329: *
! 1330: * => we do at least one iteration per call, if we are below the target.
! 1331: * => we loop until we either reach the target or whichqs indicates that
! 1332: * there is a process ready to run.
! 1333: */
! 1334: void
! 1335: uvm_pageidlezero()
! 1336: {
! 1337: struct vm_page *pg;
! 1338: struct pgfreelist *pgfl;
! 1339: int free_list, s;
! 1340: UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist);
! 1341:
! 1342: do {
! 1343: s = uvm_lock_fpageq();
! 1344:
! 1345: if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
! 1346: uvm.page_idle_zero = FALSE;
! 1347: uvm_unlock_fpageq(s);
! 1348: return;
! 1349: }
! 1350:
! 1351: for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
! 1352: pgfl = &uvm.page_free[free_list];
! 1353: if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[
! 1354: PGFL_UNKNOWN])) != NULL)
! 1355: break;
! 1356: }
! 1357:
! 1358: if (pg == NULL) {
! 1359: /*
! 1360: * No non-zero'd pages; don't bother trying again
! 1361: * until we know we have non-zero'd pages free.
! 1362: */
! 1363: uvm.page_idle_zero = FALSE;
! 1364: uvm_unlock_fpageq(s);
! 1365: return;
! 1366: }
! 1367:
! 1368: TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq);
! 1369: uvmexp.free--;
! 1370: uvm_unlock_fpageq(s);
! 1371:
! 1372: #ifdef PMAP_PAGEIDLEZERO
! 1373: if (PMAP_PAGEIDLEZERO(pg) == FALSE) {
! 1374: /*
! 1375: * The machine-dependent code detected some
! 1376: * reason for us to abort zeroing pages,
! 1377: * probably because there is a process now
! 1378: * ready to run.
! 1379: */
! 1380: s = uvm_lock_fpageq();
! 1381: TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN],
! 1382: pg, pageq);
! 1383: uvmexp.free++;
! 1384: uvmexp.zeroaborts++;
! 1385: uvm_unlock_fpageq(s);
! 1386: return;
! 1387: }
! 1388: #else
! 1389: /*
! 1390: * XXX This will toast the cache unless the pmap_zero_page()
! 1391: * XXX implementation does uncached access.
! 1392: */
! 1393: pmap_zero_page(pg);
! 1394: #endif
! 1395: atomic_setbits_int(&pg->pg_flags, PG_ZERO);
! 1396:
! 1397: s = uvm_lock_fpageq();
! 1398: TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq);
! 1399: uvmexp.free++;
! 1400: uvmexp.zeropages++;
! 1401: uvm_unlock_fpageq(s);
! 1402: } while (sched_is_idle());
! 1403: }
CVSweb