LD = $(TOOLPREFIX)ld
OBJCOPY = $(TOOLPREFIX)objcopy
OBJDUMP = $(TOOLPREFIX)objdump
-CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror -fno-omit-frame-pointer
+CFLAGS = -std=c11 -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror -fno-omit-frame-pointer
CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
ASFLAGS = -m32 -gdwarf-2 -Wa,-divide
# FreeBSD ld wants ``elf_i386_fbsd''
+#include <stdatomic.h>
+
#include "types.h"
#include "defs.h"
#include "param.h"
{
cprintf("cpu%d: starting %d\n", cpuid(), cpuid());
idtinit(); // load idt register
- xchg(&(mycpu()->started), 1); // tell startothers() we're up
+ atomic_store(&mycpu()->started, 1); // tell startothers() we're up -- atomically
scheduler(); // start running processes
}
lapicstartap(c->apicid, V2P(code));
// wait for cpu to finish mpmain()
- while(c->started == 0)
+ while(atomic_load(&c->started) == 0)
;
}
}
+#include <stdatomic.h>
+
// Per-CPU state
struct cpu {
uchar apicid; // Local APIC ID
struct context *scheduler; // swtch() here to enter scheduler
struct taskstate ts; // Used by x86 to find stack for interrupt
struct segdesc gdt[NSEGS]; // x86 global descriptor table
- volatile uint started; // Has the CPU started?
+ atomic_uint started; // Has the CPU started?
int ncli; // Depth of pushcli nesting.
int intena; // Were interrupts enabled before pushcli?
struct proc *proc; // The process running on this cpu or null
// Mutual exclusion spin locks.
+#include <stdatomic.h>
#include "types.h"
#include "defs.h"
void
initlock(struct spinlock *lk, char *name)
{
+ atomic_init(&lk->locked, 0);
+
lk->name = name;
- lk->locked = 0;
lk->cpu = 0;
}
if(holding(lk))
panic("acquire");
- // The xchg is atomic.
- while(xchg(&lk->locked, 1) != 0)
+ // Use c11 atomics to acquire the lock
+ // Here we atomically exchange locked with 1. If locked was 0, then we've
+ // just acquired the lock!
+ // We use the acquire release semantics (orderings). We really only want
+ // acquire semantics, but we are doing a read and modify operation at once
+ // which requires acquire (write) and release (read) ordering semantics.
+ while (atomic_exchange_explicit(&lk->locked, 1, memory_order_acq_rel) != 0)
;
- // Tell the C compiler and the processor to not move loads or stores
- // past this point, to ensure that the critical section's memory
- // references happen after the lock is acquired.
- __sync_synchronize();
-
// Record info about lock acquisition for debugging.
lk->cpu = mycpu();
getcallerpcs(&lk, lk->pcs);
lk->pcs[0] = 0;
lk->cpu = 0;
- // Tell the C compiler and the processor to not move loads or stores
- // past this point, to ensure that all the stores in the critical
- // section are visible to other cores before the lock is released.
- // Both the C compiler and the hardware may re-order loads and
- // stores; __sync_synchronize() tells them both not to.
- __sync_synchronize();
-
- // Release the lock, equivalent to lk->locked = 0.
- // This code can't use a C assignment, since it might
- // not be atomic. A real OS would use C atomics here.
- asm volatile("movl $0, %0" : "+m" (lk->locked) : );
+
+ // Use c11 atomics to release the lock.
+ // Here we set the locked value to 0 atomically
+ // We also give it "release" semantics, as we're doing an unlock
+ // (e.g. release) operation
+ atomic_store_explicit(&lk->locked, 0, memory_order_release);
popcli();
}
+#include <stdatomic.h>
+
// Mutual exclusion lock.
struct spinlock {
- uint locked; // Is the lock held?
+ atomic_uint locked; // Is the lock held?
// For debugging:
char *name; // Name of lock.
validateint(int *p)
{
int res;
- asm("mov %%esp, %%ebx\n\t"
+ __asm__("mov %%esp, %%ebx\n\t"
"mov %3, %%esp\n\t"
"int %2\n\t"
"mov %%ebx, %%esp" :
port = RTC_ADDR;
val = 0x09; /* year */
/* http://wiki.osdev.org/Inline_Assembly/Examples */
- asm volatile("outb %0,%1"::"a"(val), "d" (port));
+ __asm__ volatile("outb %0,%1"::"a"(val), "d" (port));
port = RTC_DATA;
- asm volatile("inb %1,%0" : "=a" (val) : "d" (port));
+ __asm__ volatile("inb %1,%0" : "=a" (val) : "d" (port));
printf(1, "uio: uio succeeded; test FAILED\n");
exit();
} else if(pid < 0){
{
uchar data;
- asm volatile("in %1,%0" : "=a" (data) : "d" (port));
+ __asm__ volatile("in %1,%0" : "=a" (data) : "d" (port));
return data;
}
static inline void
insl(int port, void *addr, int cnt)
{
- asm volatile("cld; rep insl" :
+ __asm__ volatile("cld; rep insl" :
"=D" (addr), "=c" (cnt) :
"d" (port), "0" (addr), "1" (cnt) :
"memory", "cc");
static inline void
outb(ushort port, uchar data)
{
- asm volatile("out %0,%1" : : "a" (data), "d" (port));
+ __asm__ volatile("out %0,%1" : : "a" (data), "d" (port));
}
static inline void
outw(ushort port, ushort data)
{
- asm volatile("out %0,%1" : : "a" (data), "d" (port));
+ __asm__ volatile("out %0,%1" : : "a" (data), "d" (port));
}
static inline void
outsl(int port, const void *addr, int cnt)
{
- asm volatile("cld; rep outsl" :
+ __asm__ volatile("cld; rep outsl" :
"=S" (addr), "=c" (cnt) :
"d" (port), "0" (addr), "1" (cnt) :
"cc");
static inline void
stosb(void *addr, int data, int cnt)
{
- asm volatile("cld; rep stosb" :
+ __asm__ volatile("cld; rep stosb" :
"=D" (addr), "=c" (cnt) :
"0" (addr), "1" (cnt), "a" (data) :
"memory", "cc");
static inline void
stosl(void *addr, int data, int cnt)
{
- asm volatile("cld; rep stosl" :
+ __asm__ volatile("cld; rep stosl" :
"=D" (addr), "=c" (cnt) :
"0" (addr), "1" (cnt), "a" (data) :
"memory", "cc");
pd[1] = (uint)p;
pd[2] = (uint)p >> 16;
- asm volatile("lgdt (%0)" : : "r" (pd));
+ __asm__ volatile("lgdt (%0)" : : "r" (pd));
}
struct gatedesc;
pd[1] = (uint)p;
pd[2] = (uint)p >> 16;
- asm volatile("lidt (%0)" : : "r" (pd));
+ __asm__ volatile("lidt (%0)" : : "r" (pd));
}
static inline void
ltr(ushort sel)
{
- asm volatile("ltr %0" : : "r" (sel));
+ __asm__ volatile("ltr %0" : : "r" (sel));
}
static inline uint
readeflags(void)
{
uint eflags;
- asm volatile("pushfl; popl %0" : "=r" (eflags));
+ __asm__ volatile("pushfl; popl %0" : "=r" (eflags));
return eflags;
}
static inline void
loadgs(ushort v)
{
- asm volatile("movw %0, %%gs" : : "r" (v));
+ __asm__ volatile("movw %0, %%gs" : : "r" (v));
}
static inline void
cli(void)
{
- asm volatile("cli");
+ __asm__ volatile("cli");
}
static inline void
sti(void)
{
- asm volatile("sti");
-}
-
-static inline uint
-xchg(volatile uint *addr, uint newval)
-{
- uint result;
-
- // The + in "+m" denotes a read-modify-write operand.
- asm volatile("lock; xchgl %0, %1" :
- "+m" (*addr), "=a" (result) :
- "1" (newval) :
- "cc");
- return result;
+ __asm__ volatile("sti");
}
static inline uint
rcr2(void)
{
uint val;
- asm volatile("movl %%cr2,%0" : "=r" (val));
+ __asm__ volatile("movl %%cr2,%0" : "=r" (val));
return val;
}
static inline void
lcr3(uint val)
{
- asm volatile("movl %0,%%cr3" : : "r" (val));
+ __asm__ volatile("movl %0,%%cr3" : : "r" (val));
}
//PAGEBREAK: 36