4.3BSD-UWisc/src/sys/sys/vm_subr.c
/*
* Copyright (c) 1982, 1986 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
*
* @(#)vm_subr.c 7.1 (Berkeley) 6/5/86
*/
#ifndef lint
static char rcs_id[] = {"$Header: vm_subr.c,v 3.1 86/10/22 13:48:50 tadl Exp $"};
#endif not lint
/*
* RCS Info
* $Locker: $
*/
#include "../machine/pte.h"
#include "param.h"
#include "systm.h"
#include "user.h"
#include "vm.h"
#include "proc.h"
#include "cmap.h"
#include "buf.h"
#include "text.h"
#include "vfs.h"
#include "vnode.h"
#ifdef vax
#include "../vax/mtpr.h"
#endif
/*
* Make uarea of process p addressible at kernel virtual
* address uarea through sysmap locations starting at map.
*/
uaccess(p, map, uarea)
register struct proc *p;
struct pte *map;
register struct user *uarea;
{
register int i;
register struct pte *mp = map;
for (i = 0; i < UPAGES; i++) {
*(int *)mp = 0;
mp->pg_pfnum = p->p_addr[i].pg_pfnum;
mp++;
}
vmaccess(map, (caddr_t)uarea, UPAGES);
}
/*
* Validate the kernel map for size ptes which
* start at ppte in the sysmap, and which map
* kernel virtual addresses starting with vaddr.
*/
vmaccess(ppte0, vaddr, size0)
struct pte *ppte0;
register caddr_t vaddr;
int size0;
{
register struct pte *ppte = ppte0;
register int size = size0;
while (size != 0) {
mapin(ppte, btop(vaddr), (unsigned)(*(int *)ppte & PG_PFNUM), 1,
(int)(PG_V|PG_KW));
ppte++;
vaddr += NBPG;
--size;
}
}
/*
* Convert a pte pointer to
* a virtual page number.
*/
ptetov(p, pte)
register struct proc *p;
struct pte *pte;
{
register int j;
j = pte - p->p_p0br;
if (j < p->p_tsize + p->p_dsize)
return (j);
return ((BTOPUSRSTACK + UPAGES) - p->p_szpt * NPTEPG + j);
}
#ifdef notdef
/*
* Convert a virtual page
* number to a pte address.
*/
struct pte *
vtopte(p, v)
register struct proc *p;
int v;
{
if (v < p->p_tsize + p->p_dsize)
return (p->p_p0br + v);
return (p->p_p0br + (p->p_szpt * NPTEPG + v - (BTOPUSRSTACK + UPAGES)));
}
#endif notdef
/*
* Initialize the page tables for paging from an inode,
* by scouring up the indirect blocks in order.
* Corresponding area of memory should have been vmemfree()d
* first or just created.
*/
vinifod(pte, fileno, vp, bfirst, count)
register struct fpte *pte;
int fileno;
register struct vnode *vp;
daddr_t bfirst;
size_t count;
{
int blast = bfirst + howmany(count, CLSIZE);
register int i, j;
int bn;
int nclpbsize = vp->v_vfsp->vfs_bsize / CLBYTES;
while (bfirst < blast) {
i = bfirst % nclpbsize;
(void) VOP_BMAP(vp, bfirst / nclpbsize, (struct vnode *)0, &bn);
for ( ; i < nclpbsize; i++) {
pte->pg_fod = 1;
pte->pg_fileno = fileno;
if (u.u_error || bn < 0) {
pte->pg_blkno = 0;
pte->pg_fileno = PG_FZERO;
cnt.v_nzfod += CLSIZE;
} else {
pte->pg_blkno = bn + btodb(i * CLBYTES);
cnt.v_nexfod += CLSIZE;
}
for (j = 1; j < CLSIZE; j++)
pte[j] = pte[0];
pte += CLSIZE;
bfirst++;
if (bfirst == blast)
break;
}
}
}