static ushort *crt = (ushort*)0xb8000; // CGA memory
-static struct spinlock console_lock;
-int panicked = 0;
-volatile int use_console_lock = 0;
+static struct {
+ struct spinlock lock;
+ int locking;
+} cons;
+
+static int panicked = 0;
static void
cgaputc(int c)
uint *argp;
char *s;
- locking = use_console_lock;
+ locking = cons.locking;
if(locking)
- acquire(&console_lock);
+ acquire(&cons.lock);
argp = (uint*)(void*)&fmt + 1;
state = 0;
}
if(locking)
- release(&console_lock);
+ release(&cons.lock);
}
int
int i;
iunlock(ip);
- acquire(&console_lock);
+ acquire(&cons.lock);
for(i = 0; i < n; i++)
consputc(buf[i] & 0xff);
- release(&console_lock);
+ release(&cons.lock);
ilock(ip);
return n;
void
consoleinit(void)
{
- initlock(&console_lock, "console");
- initlock(&input.lock, "console input");
+ initlock(&cons.lock, "console");
+ initlock(&input.lock, "input");
devsw[CONSOLE].write = consolewrite;
devsw[CONSOLE].read = consoleread;
- use_console_lock = 1;
+ cons.locking = 1;
picenable(IRQ_KBD);
ioapicenable(IRQ_KBD, 0);
uint pcs[10];
cli();
- use_console_lock = 0;
+ cons.locking = 0;
cprintf("cpu%d: panic: ", cpu());
cprintf(s);
cprintf("\n");
#include "param.h"
#include "spinlock.h"
-struct spinlock kalloc_lock;
-
struct run {
struct run *next;
int len; // bytes
};
-struct run *freelist;
+
+struct {
+ struct spinlock lock;
+ struct run *freelist;
+} kmem;
// Initialize free list of physical pages.
// This code cheats by just considering one megabyte of
uint mem;
char *start;
- initlock(&kalloc_lock, "kalloc");
+ initlock(&kmem.lock, "kmem");
start = (char*) &end;
start = (char*) (((uint)start + PAGE) & ~(PAGE-1));
mem = 256; // assume computer has 256 pages of RAM
// Fill with junk to catch dangling refs.
memset(v, 1, len);
- acquire(&kalloc_lock);
+ acquire(&kmem.lock);
p = (struct run*)v;
pend = (struct run*)(v + len);
- for(rp=&freelist; (r=*rp) != 0 && r <= pend; rp=&r->next){
+ for(rp=&kmem.freelist; (r=*rp) != 0 && r <= pend; rp=&r->next){
rend = (struct run*)((char*)r + r->len);
if(r <= p && p < rend)
panic("freeing free page");
*rp = p;
out:
- release(&kalloc_lock);
+ release(&kmem.lock);
}
// Allocate n bytes of physical memory.
if(n % PAGE || n <= 0)
panic("kalloc");
- acquire(&kalloc_lock);
- for(rp=&freelist; (r=*rp) != 0; rp=&r->next){
+ acquire(&kmem.lock);
+ for(rp=&kmem.freelist; (r=*rp) != 0; rp=&r->next){
if(r->len == n){
*rp = r->next;
- release(&kalloc_lock);
+ release(&kmem.lock);
return (char*)r;
}
if(r->len > n){
r->len -= n;
p = (char*)r + r->len;
- release(&kalloc_lock);
+ release(&kmem.lock);
return p;
}
}
- release(&kalloc_lock);
+ release(&kmem.lock);
cprintf("kalloc: out of memory\n");
return 0;
#include "proc.h"
#include "spinlock.h"
-struct spinlock proc_table_lock;
+struct {
+ struct spinlock lock;
+ struct proc proc[NPROC];
+} ptable;
-struct proc proc[NPROC];
static struct proc *initproc;
int nextpid = 1;
void
pinit(void)
{
- initlock(&proc_table_lock, "proc_table");
+ initlock(&ptable.lock, "ptable");
}
// Look in the process table for an UNUSED proc.
int i;
struct proc *p;
- acquire(&proc_table_lock);
- for(i = 0; i < NPROC; i++){
- p = &proc[i];
+ acquire(&ptable.lock);
+ for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->state == UNUSED){
p->state = EMBRYO;
p->pid = nextpid++;
goto found;
}
}
- release(&proc_table_lock);
+ release(&ptable.lock);
return 0;
found:
- release(&proc_table_lock);
+ release(&ptable.lock);
// Allocate kernel stack if necessary.
if((p->kstack = kalloc(KSTACKSIZE)) == 0){
sti();
// Loop over process table looking for process to run.
- acquire(&proc_table_lock);
- for(i = 0; i < NPROC; i++){
- p = &proc[i];
+ acquire(&ptable.lock);
+ for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->state != RUNNABLE)
continue;
// Switch to chosen process. It is the process's job
- // to release proc_table_lock and then reacquire it
+ // to release ptable.lock and then reacquire it
// before jumping back to us.
cp = p;
usegment();
cp = 0;
usegment();
}
- release(&proc_table_lock);
+ release(&ptable.lock);
}
}
-// Enter scheduler. Must already hold proc_table_lock
+// Enter scheduler. Must already hold ptable.lock
// and have changed cp->state.
void
sched(void)
panic("sched interruptible");
if(cp->state == RUNNING)
panic("sched running");
- if(!holding(&proc_table_lock))
- panic("sched proc_table_lock");
+ if(!holding(&ptable.lock))
+ panic("sched ptable.lock");
if(c->ncli != 1)
panic("sched locks");
void
yield(void)
{
- acquire(&proc_table_lock);
+ acquire(&ptable.lock);
cp->state = RUNNABLE;
sched();
- release(&proc_table_lock);
+ release(&ptable.lock);
}
// A fork child's very first scheduling by scheduler()
void
forkret(void)
{
- // Still holding proc_table_lock from scheduler.
- release(&proc_table_lock);
+ // Still holding ptable.lock from scheduler.
+ release(&ptable.lock);
// Jump into assembly, never to return.
forkret1(cp->tf);
if(lk == 0)
panic("sleep without lk");
- // Must acquire proc_table_lock in order to
+ // Must acquire ptable.lock in order to
// change p->state and then call sched.
- // Once we hold proc_table_lock, we can be
+ // Once we hold ptable.lock, we can be
// guaranteed that we won't miss any wakeup
- // (wakeup runs with proc_table_lock locked),
+ // (wakeup runs with ptable.lock locked),
// so it's okay to release lk.
- if(lk != &proc_table_lock){
- acquire(&proc_table_lock);
+ if(lk != &ptable.lock){
+ acquire(&ptable.lock);
release(lk);
}
cp->chan = 0;
// Reacquire original lock.
- if(lk != &proc_table_lock){
- release(&proc_table_lock);
+ if(lk != &ptable.lock){
+ release(&ptable.lock);
acquire(lk);
}
}
//PAGEBREAK!
// Wake up all processes sleeping on chan.
-// Proc_table_lock must be held.
+// The ptable lock must be held.
static void
wakeup1(void *chan)
{
void
wakeup(void *chan)
{
- acquire(&proc_table_lock);
+ acquire(&ptable.lock);
wakeup1(chan);
- release(&proc_table_lock);
+ release(&ptable.lock);
}
// Kill the process with the given pid.
{
struct proc *p;
- acquire(&proc_table_lock);
- for(p = proc; p < &proc[NPROC]; p++){
+ acquire(&ptable.lock);
+ for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->pid == pid){
p->killed = 1;
// Wake process from sleep if necessary.
if(p->state == SLEEPING)
p->state = RUNNABLE;
- release(&proc_table_lock);
+ release(&ptable.lock);
return 0;
}
}
- release(&proc_table_lock);
+ release(&ptable.lock);
return -1;
}
iput(cp->cwd);
cp->cwd = 0;
- acquire(&proc_table_lock);
+ acquire(&ptable.lock);
// Parent might be sleeping in wait().
wakeup1(cp->parent);
// Pass abandoned children to init.
- for(p = proc; p < &proc[NPROC]; p++){
+ for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->parent == cp){
p->parent = initproc;
if(p->state == ZOMBIE)
struct proc *p;
int i, havekids, pid;
- acquire(&proc_table_lock);
+ acquire(&ptable.lock);
for(;;){
// Scan through table looking for zombie children.
havekids = 0;
- for(i = 0; i < NPROC; i++){
- p = &proc[i];
+ for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->state == UNUSED)
continue;
if(p->parent == cp){
p->pid = 0;
p->parent = 0;
p->name[0] = 0;
- release(&proc_table_lock);
+ release(&ptable.lock);
return pid;
}
}
// No point waiting if we don't have any children.
if(!havekids || cp->killed){
- release(&proc_table_lock);
+ release(&ptable.lock);
return -1;
}
// Wait for children to exit. (See wakeup1 call in proc_exit.)
- sleep(cp, &proc_table_lock);
+ sleep(cp, &ptable.lock);
}
}
char *state;
uint pc[10];
- for(i = 0; i < NPROC; i++){
- p = &proc[i];
+ for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->state == UNUSED)
continue;
if(p->state >= 0 && p->state < NELEM(states) && states[p->state])