AND w/ cprintf("kbd overflow"), panic holding locks in scheduler
maybe also simulataneous panic("interrupt while holding a lock")
+
+again (holding down x key):
+ kbd overflow
+ kbd oaaniicloowh
+ olding locks in scheduler
+ trap v 33 eip 100F5F c^CNext at t=32166285
+ (0) [0x0010033e] 0008:0010033e (unk. ctxt): jmp .+0xfffffffe (0x0010033e) ; ebfe
+ (1) [0x0010005c] 0008:0010005c (unk. ctxt): jmp .+0xfffffffe (0x0010005c) ; ebfe
+cpu0 paniced due to holding locks in scheduler
+cpu1 got panic("interrupt while holding a lock")
+ again in lapic_write.
+ while re-enabling an IRQ?
+
+again:
+cpu 0 panic("holding locks in scheduler")
+ but didn't trigger related panics earlier in scheduler or sched()
+ of course the panic is right after release() and thus sti()
+ so we may be seeing an interrupt that left locks held
+cpu 1 unknown panic
+why does it happen to both cpus at the same time?
+
+again:
+cpu 0 panic("holding locks in scheduler")
+ but trap() didn't see any held locks on return
+cpu 1 no apparent panic
+
+again:
+cpu 0 panic: holding too many locks in scheduler
+cpu 1 panic: kbd_intr returned while holding a lock
+
+again:
+cpu 0 panic: holding too man
+ la 10d70c lr 10027b
+ those don't seem to be locks...
+ only place non-constant lock is used is sleep()'s 2nd arg
+ maybe register not preserved across context switch?
+ it's in %esi...
+ sched() doesn't touch %esi
+ %esi is evidently callee-saved
+ something to do with interrupts? since ordinarily it works
+cpu 1 panic: kbd_int returned while holding a lock
+ la 107340 lr 107300
+ console_lock and kbd_lock
+
+maybe console_lock is often not released due to change
+ in use_console_lock (panic on other cpu)
+
+again:
+cpu 0: panic: h...
+ la 10D78C lr 102CA0
+cpu 1: panic: acquire FL_IF (later than cpu 0)
+
+but if sleep() were acquiring random locks, we'd see panics
+in release, after sleep() returned.
+actually when system is idle, maybe no-one sleeps at all.
+ just scheduler() and interrupts
+
+questions:
+ does userfs use pipes? or fork?
+ no
+ does anything bad happen if process 1 exits? eg exit() in cat.c
+ looks ok
+ are there really no processes left?
+ lock_init() so we can have a magic number?
+
+HMM maybe the variables at the end of struct cpu are being overwritten
+ nlocks, lastacquire, lastrelease
+ by cpu->stack?
+ adding junk buffers maybe causes crash to take longer...
+ when do we run on cpu stack?
+ just in scheduler()?
+ and interrupts from scheduler()
+
+OH! recursive interrupts will use up any amount of cpu[].stack!
+ underflow and wrecks *previous* cpu's struct
#include "buf.h"
struct buf buf[NBUF];
-struct spinlock buf_table_lock = { "buf_table" };
+struct spinlock buf_table_lock;
+
+void
+binit(void)
+{
+ initlock(&buf_table_lock, "buf_table");
+}
struct buf *
getblk()
#include "defs.h"
#include "spinlock.h"
#include "dev.h"
+#include "param.h"
-struct spinlock console_lock = { "console" };
+struct spinlock console_lock;
int panicked = 0;
int use_console_lock = 0;
+// per-cpu copy of output to help panic/lock debugging
+char obuf[NCPU][1024];
+uint obufi[NCPU];
+
/*
* copy console output to parallel port, which you can tell
* .bochsrc to copy to the stdout:
ushort *crt = (ushort *) 0xB8000; // base of CGA memory
int ind;
+ obuf[rcr4()][obufi[rcr4()]++] = c;
+ if(obufi[rcr4()] >= 1024)
+ obufi[rcr4()] = 0;
+
if(panicked){
cli();
for(;;)
void
cprintf(char *fmt, ...)
{
- int i, state = 0, c;
+ int i, state = 0, c, locking = 0;
uint *ap = (uint *)(void*)&fmt + 1;
- if(use_console_lock)
+ if(use_console_lock){
+ locking = 1;
acquire(&console_lock);
+ }
for(i = 0; fmt[i]; i++){
c = fmt[i] & 0xff;
}
}
- if(use_console_lock)
+ if(locking)
release(&console_lock);
}
char kbd_buf[KBD_BUF];
int kbd_r;
int kbd_w;
-struct spinlock kbd_lock = { "kbd_lock" };
+struct spinlock kbd_lock;
void
kbd_intr()
st = inb(KBSTATP);
if ((st & KBS_DIB) == 0){
- lapic_eoi();
return;
}
data = inb(KBDATAP);
if (data == 0xE0) {
shift |= E0ESC;
- lapic_eoi();
return;
} else if (data & 0x80) {
// Key released
data = (shift & E0ESC ? data : data & 0x7F);
shift &= ~(shiftcode[data] | E0ESC);
- lapic_eoi();
return;
} else if (shift & E0ESC) {
// Last character was an E0 escape; or with 0x80
}
release(&kbd_lock);
-
- lapic_eoi();
}
void
console_init()
{
+ initlock(&console_lock, "console");
+ initlock(&kbd_lock, "kbd");
+
devsw[CONSOLE].d_write = console_write;
ioapic_enable (IRQ_KBD, 1);
+
+ use_console_lock = 1;
}
void kbd_intr(void);
// proc.c
+void pinit(void);
struct proc;
struct jmpbuf;
void setupsegs(struct proc *);
// spinlock.c
struct spinlock;
+void initlock(struct spinlock *, char *);
void acquire(struct spinlock*);
void release(struct spinlock*);
int holding(struct spinlock*);
int pipe_read(struct pipe *p, char *addr, int n);
// fd.c
+void fd_init(void);
int fd_ualloc(void);
struct fd * fd_alloc(void);
void fd_close(struct fd *);
int ide_finish(void *);
// bio.c
+void binit(void);
struct buf;
struct buf *getblk(void);
struct buf *bread(uint, uint);
void brelse(struct buf *);
// fs.c
+void iinit(void);
struct inode * iget(uint dev, uint inum);
void ilock(struct inode *ip);
void iunlock(struct inode *ip);
struct fd fds[NFD];
+void
+fd_init(void)
+{
+ initlock(&fd_table_lock, "fd_table");
+}
+
/*
* allocate a file descriptor number for curproc.
*/
// these are inodes currently in use
// an entry is free if count == 0
struct inode inode[NINODE];
-struct spinlock inode_table_lock = { "inode_table" };
+struct spinlock inode_table_lock;
uint rootdev = 1;
+void
+iinit(void)
+{
+ initlock(&inode_table_lock, "inode_table");
+}
+
static uint
balloc(uint dev)
{
};
struct ide_request request[NREQUEST];
int head, tail;
-struct spinlock ide_lock = { "ide" };
+struct spinlock ide_lock;
int disk_channel;
void
ide_init(void)
{
+ initlock(&ide_lock, "ide");
if (ncpu < 2) {
panic ("ide_init: disk interrupt is going to the second cpu\n");
}
// cprintf("cpu%d: ide_intr\n", cpu());
wakeup(&request[tail]);
release(&ide_lock);
- lapic_eoi();
}
int
#include "proc.h"
#include "spinlock.h"
-struct spinlock kalloc_lock = { "kalloc" };
+struct spinlock kalloc_lock;
struct run {
struct run *next;
uint mem;
char *start;
+ initlock(&kalloc_lock, "kalloc");
start = (char *) &end;
start = (char *) (((uint)start + PAGE) & ~(PAGE-1));
mem = 256; // XXX
extern uchar _binary_usertests_start[], _binary_usertests_size[];
extern uchar _binary_userfs_start[], _binary_userfs_size[];
-extern int use_console_lock;
-
// CPU 0 starts running C code here.
// This is called main0 not main so that it can have
// a void return type. Gcc can't handle functions named
int i;
struct proc *p;
+ lcr4(0); // xxx copy of cpu #
+
// clear BSS
memset(edata, 0, end - edata);
// Make sure interrupts stay disabled on all processors
// until each signals it is ready, by pretending to hold
// an extra lock.
- for(i=0; i<NCPU; i++)
+ // xxx maybe replace w/ acquire remembering if FL_IF
+ for(i=0; i<NCPU; i++){
cpus[i].nlock++;
+ cpus[i].guard1 = 0xdeadbeef;
+ cpus[i].guard2 = 0xdeadbeef;
+ }
mp_init(); // collect info about this machine
- use_console_lock = 1;
-
lapic_init(mp_bcpu());
cprintf("\n\ncpu%d: booting xv6\n\n", cpu());
+ pinit();
+ binit();
pic_init(); // initialize PIC
ioapic_init();
kinit(); // physical memory allocator
tvinit(); // trap vectors
idtinit(); // this CPU's idt register
+ fd_init();
+ iinit();
// create a fake process per CPU
// so each CPU always has a tss and a gdt
void
mpmain(void)
{
+ lcr4(1); // xxx copy of cpu #
+
cprintf("cpu%d: starting\n", cpu());
idtinit(); // CPU's idt
if(cpu() == 0)
// Set up a normal interrupt/trap gate descriptor.
// - istrap: 1 for a trap (= exception) gate, 0 for an interrupt gate.
+// interrupt gate clears FL_IF, trap gate leaves FL_IF alone
// - sel: Code segment selector for interrupt/trap handler
// - off: Offset in code segment for interrupt/trap handler
// - dpl: Descriptor Privilege Level -
p->writeopen = 1;
p->writep = 0;
p->readp = 0;
- memset(&p->lock, 0, sizeof(p->lock));
+ initlock(&p->lock, "pipe");
(*fd1)->type = FD_PIPE;
(*fd1)->readable = 1;
(*fd1)->writeable = 0;
#include "defs.h"
#include "spinlock.h"
-struct spinlock proc_table_lock = { "proc_table" };
+struct spinlock proc_table_lock;
struct proc proc[NPROC];
struct proc *curproc[NCPU];
extern void forkret(void);
extern void forkret1(struct trapframe*);
+void
+pinit(void)
+{
+ initlock(&proc_table_lock, "proc_table");
+}
+
/*
* set up a process's task state and segment descriptors
* correctly, given its current size and address in memory.
// Loop over process table looking for process to run.
acquire(&proc_table_lock);
for(i = 0; i < NPROC; i++){
+ if(cpus[cpu()].guard1 != 0xdeadbeef ||
+ cpus[cpu()].guard2 != 0xdeadbeef)
+ panic("cpu guard");
p = &proc[i];
if(p->state != RUNNABLE)
continue;
// XXX if not holding proc_table_lock panic.
}
+
release(&proc_table_lock);
if(cpus[cpu()].nlock != 0)
void
sched(void)
{
- if(setjmp(&curproc[cpu()]->jmpbuf) == 0)
+ struct proc *p = curproc[cpu()];
+
+ if(setjmp(&p->jmpbuf) == 0)
longjmp(&cpus[cpu()].jmpbuf);
}
uint sz; // total size of mem, including kernel stack
char *kstack; // kernel stack, separate from mem so it doesn't move
enum proc_state state;
- enum proc_state newstate; // desired state after swtch()
- struct spinlock *mtx; // mutex for condition variable
int pid;
int ppid;
void *chan; // sleep
struct cpu {
uchar apicid; // Local APIC ID
struct jmpbuf jmpbuf;
+ int guard1;
char mpstack[MPSTACK]; // per-cpu start-up stack
+ int guard2;
volatile int booted;
int nlock; // # of locks currently held
struct spinlock *lastacquire; // xxx debug
// because cprintf uses them itself.
//#define cprintf dont_use_cprintf
+#define LOCKMAGIC 0x6673ffea
+
extern int use_console_lock;
+void
+initlock(struct spinlock *lock, char *name)
+{
+ lock->magic = LOCKMAGIC;
+ lock->name = name;
+ lock->locked = 0;
+ lock->cpu = 0xffffffff;
+}
+
void
getcallerpcs(void *v, uint pcs[])
{
void
acquire(struct spinlock * lock)
{
+ if(lock->magic != LOCKMAGIC)
+ panic("weird lock magic");
if(holding(lock))
panic("acquire");
void
release(struct spinlock * lock)
{
+ if(lock->magic != LOCKMAGIC)
+ panic("weird lock magic");
+
if(!holding(lock))
panic("release");
lock->locked = 0;
if(--cpus[cpu()].nlock == 0)
sti();
- // xxx we may have just turned interrupts on during
- // an interrupt, is that ok?
}
int
struct spinlock {
+ uint magic;
char *name;
uint locked;
- uint pcs[10];
int cpu;
+ uint pcs[10];
};
panic("interrupt while holding a lock");
}
+ if(cpu() == 1 && curproc[cpu()] == 0){
+ if(&tf < cpus[cpu()].mpstack || &tf > cpus[cpu()].mpstack + 512){
+ cprintf("&tf %x mpstack %x\n", &tf, cpus[cpu()].mpstack);
+ panic("trap cpu stack");
+ }
+ } else if(curproc[cpu()]){
+ if(&tf < curproc[cpu()]->kstack){
+ panic("trap kstack");
+ }
+ }
+
if(v == T_SYSCALL){
struct proc *cp = curproc[cpu()];
int num = cp->tf->eax;
if(v == (IRQ_OFFSET + IRQ_IDE)){
ide_intr();
+ if(cpus[cpu()].nlock)
+ panic("ide_intr returned while holding a lock");
+ cli(); // prevent a waiting interrupt from overflowing stack
+ lapic_eoi();
return;
}
if(v == (IRQ_OFFSET + IRQ_KBD)){
kbd_intr();
+ if(cpus[cpu()].nlock){
+ panic("kbd_intr returned while holding a lock");
+ }
+ cli(); // prevent a waiting interrupt from overflowing stack
+ lapic_eoi();
return;
}