From: David Devecsery Date: Thu, 9 Apr 2020 17:17:53 +0000 (-0400) Subject: Ported to c11, switched to c11 atomics X-Git-Url: https://git.devinivas.org/?a=commitdiff_plain;h=1661a82b5986f6bbbe609dfbda53fdc0ba50d8f1;p=cs3210-lab0.git Ported to c11, switched to c11 atomics --- diff --git a/Makefile b/Makefile index 09d790c..5e48254 100644 --- a/Makefile +++ b/Makefile @@ -76,7 +76,7 @@ AS = $(TOOLPREFIX)gas LD = $(TOOLPREFIX)ld OBJCOPY = $(TOOLPREFIX)objcopy OBJDUMP = $(TOOLPREFIX)objdump -CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror -fno-omit-frame-pointer +CFLAGS = -std=c11 -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror -fno-omit-frame-pointer CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector) ASFLAGS = -m32 -gdwarf-2 -Wa,-divide # FreeBSD ld wants ``elf_i386_fbsd'' diff --git a/main.c b/main.c index 9924e64..d7045a9 100644 --- a/main.c +++ b/main.c @@ -1,3 +1,5 @@ +#include + #include "types.h" #include "defs.h" #include "param.h" @@ -53,7 +55,7 @@ mpmain(void) { cprintf("cpu%d: starting %d\n", cpuid(), cpuid()); idtinit(); // load idt register - xchg(&(mycpu()->started), 1); // tell startothers() we're up + atomic_store(&mycpu()->started, 1); // tell startothers() we're up -- atomically scheduler(); // start running processes } @@ -89,7 +91,7 @@ startothers(void) lapicstartap(c->apicid, V2P(code)); // wait for cpu to finish mpmain() - while(c->started == 0) + while(atomic_load(&c->started) == 0) ; } } diff --git a/proc.h b/proc.h index 1647114..0c574b9 100644 --- a/proc.h +++ b/proc.h @@ -1,10 +1,12 @@ +#include + // Per-CPU state struct cpu { uchar apicid; // Local APIC ID struct context *scheduler; // swtch() here to enter scheduler struct taskstate ts; // Used by x86 to find stack for interrupt struct segdesc gdt[NSEGS]; // x86 global descriptor table - volatile uint started; // Has the CPU started? + atomic_uint started; // Has the CPU started? int ncli; // Depth of pushcli nesting. int intena; // Were interrupts enabled before pushcli? struct proc *proc; // The process running on this cpu or null diff --git a/spinlock.c b/spinlock.c index 4020186..78f2325 100644 --- a/spinlock.c +++ b/spinlock.c @@ -1,4 +1,5 @@ // Mutual exclusion spin locks. +#include #include "types.h" #include "defs.h" @@ -12,8 +13,9 @@ void initlock(struct spinlock *lk, char *name) { + atomic_init(&lk->locked, 0); + lk->name = name; - lk->locked = 0; lk->cpu = 0; } @@ -28,15 +30,15 @@ acquire(struct spinlock *lk) if(holding(lk)) panic("acquire"); - // The xchg is atomic. - while(xchg(&lk->locked, 1) != 0) + // Use c11 atomics to acquire the lock + // Here we atomically exchange locked with 1. If locked was 0, then we've + // just acquired the lock! + // We use the acquire release semantics (orderings). We really only want + // acquire semantics, but we are doing a read and modify operation at once + // which requires acquire (write) and release (read) ordering semantics. + while (atomic_exchange_explicit(&lk->locked, 1, memory_order_acq_rel) != 0) ; - // Tell the C compiler and the processor to not move loads or stores - // past this point, to ensure that the critical section's memory - // references happen after the lock is acquired. - __sync_synchronize(); - // Record info about lock acquisition for debugging. lk->cpu = mycpu(); getcallerpcs(&lk, lk->pcs); @@ -52,17 +54,12 @@ release(struct spinlock *lk) lk->pcs[0] = 0; lk->cpu = 0; - // Tell the C compiler and the processor to not move loads or stores - // past this point, to ensure that all the stores in the critical - // section are visible to other cores before the lock is released. - // Both the C compiler and the hardware may re-order loads and - // stores; __sync_synchronize() tells them both not to. - __sync_synchronize(); - - // Release the lock, equivalent to lk->locked = 0. - // This code can't use a C assignment, since it might - // not be atomic. A real OS would use C atomics here. - asm volatile("movl $0, %0" : "+m" (lk->locked) : ); + + // Use c11 atomics to release the lock. + // Here we set the locked value to 0 atomically + // We also give it "release" semantics, as we're doing an unlock + // (e.g. release) operation + atomic_store_explicit(&lk->locked, 0, memory_order_release); popcli(); } diff --git a/spinlock.h b/spinlock.h index 0a9d8e2..947aaee 100644 --- a/spinlock.h +++ b/spinlock.h @@ -1,6 +1,8 @@ +#include + // Mutual exclusion lock. struct spinlock { - uint locked; // Is the lock held? + atomic_uint locked; // Is the lock held? // For debugging: char *name; // Name of lock. diff --git a/usertests.c b/usertests.c index a1e97e7..44e57f7 100644 --- a/usertests.c +++ b/usertests.c @@ -1550,7 +1550,7 @@ void validateint(int *p) { int res; - asm("mov %%esp, %%ebx\n\t" + __asm__("mov %%esp, %%ebx\n\t" "mov %3, %%esp\n\t" "int %2\n\t" "mov %%ebx, %%esp" : @@ -1711,9 +1711,9 @@ uio() port = RTC_ADDR; val = 0x09; /* year */ /* http://wiki.osdev.org/Inline_Assembly/Examples */ - asm volatile("outb %0,%1"::"a"(val), "d" (port)); + __asm__ volatile("outb %0,%1"::"a"(val), "d" (port)); port = RTC_DATA; - asm volatile("inb %1,%0" : "=a" (val) : "d" (port)); + __asm__ volatile("inb %1,%0" : "=a" (val) : "d" (port)); printf(1, "uio: uio succeeded; test FAILED\n"); exit(); } else if(pid < 0){ diff --git a/x86.h b/x86.h index 07312a5..6c7c8ec 100644 --- a/x86.h +++ b/x86.h @@ -5,14 +5,14 @@ inb(ushort port) { uchar data; - asm volatile("in %1,%0" : "=a" (data) : "d" (port)); + __asm__ volatile("in %1,%0" : "=a" (data) : "d" (port)); return data; } static inline void insl(int port, void *addr, int cnt) { - asm volatile("cld; rep insl" : + __asm__ volatile("cld; rep insl" : "=D" (addr), "=c" (cnt) : "d" (port), "0" (addr), "1" (cnt) : "memory", "cc"); @@ -21,19 +21,19 @@ insl(int port, void *addr, int cnt) static inline void outb(ushort port, uchar data) { - asm volatile("out %0,%1" : : "a" (data), "d" (port)); + __asm__ volatile("out %0,%1" : : "a" (data), "d" (port)); } static inline void outw(ushort port, ushort data) { - asm volatile("out %0,%1" : : "a" (data), "d" (port)); + __asm__ volatile("out %0,%1" : : "a" (data), "d" (port)); } static inline void outsl(int port, const void *addr, int cnt) { - asm volatile("cld; rep outsl" : + __asm__ volatile("cld; rep outsl" : "=S" (addr), "=c" (cnt) : "d" (port), "0" (addr), "1" (cnt) : "cc"); @@ -42,7 +42,7 @@ outsl(int port, const void *addr, int cnt) static inline void stosb(void *addr, int data, int cnt) { - asm volatile("cld; rep stosb" : + __asm__ volatile("cld; rep stosb" : "=D" (addr), "=c" (cnt) : "0" (addr), "1" (cnt), "a" (data) : "memory", "cc"); @@ -51,7 +51,7 @@ stosb(void *addr, int data, int cnt) static inline void stosl(void *addr, int data, int cnt) { - asm volatile("cld; rep stosl" : + __asm__ volatile("cld; rep stosl" : "=D" (addr), "=c" (cnt) : "0" (addr), "1" (cnt), "a" (data) : "memory", "cc"); @@ -68,7 +68,7 @@ lgdt(struct segdesc *p, int size) pd[1] = (uint)p; pd[2] = (uint)p >> 16; - asm volatile("lgdt (%0)" : : "r" (pd)); + __asm__ volatile("lgdt (%0)" : : "r" (pd)); } struct gatedesc; @@ -82,66 +82,53 @@ lidt(struct gatedesc *p, int size) pd[1] = (uint)p; pd[2] = (uint)p >> 16; - asm volatile("lidt (%0)" : : "r" (pd)); + __asm__ volatile("lidt (%0)" : : "r" (pd)); } static inline void ltr(ushort sel) { - asm volatile("ltr %0" : : "r" (sel)); + __asm__ volatile("ltr %0" : : "r" (sel)); } static inline uint readeflags(void) { uint eflags; - asm volatile("pushfl; popl %0" : "=r" (eflags)); + __asm__ volatile("pushfl; popl %0" : "=r" (eflags)); return eflags; } static inline void loadgs(ushort v) { - asm volatile("movw %0, %%gs" : : "r" (v)); + __asm__ volatile("movw %0, %%gs" : : "r" (v)); } static inline void cli(void) { - asm volatile("cli"); + __asm__ volatile("cli"); } static inline void sti(void) { - asm volatile("sti"); -} - -static inline uint -xchg(volatile uint *addr, uint newval) -{ - uint result; - - // The + in "+m" denotes a read-modify-write operand. - asm volatile("lock; xchgl %0, %1" : - "+m" (*addr), "=a" (result) : - "1" (newval) : - "cc"); - return result; + __asm__ volatile("sti"); } static inline uint rcr2(void) { uint val; - asm volatile("movl %%cr2,%0" : "=r" (val)); + __asm__ volatile("movl %%cr2,%0" : "=r" (val)); return val; } static inline void lcr3(uint val) { - asm volatile("movl %0,%%cr3" : : "r" (val)); + __asm__ volatile("movl %0,%%cr3" : : "r" (val)); } //PAGEBREAK: 36