[syslinux:lwip] core: simple thread library

syslinux-bot for H. Peter Anvin hpa at zytor.com
Fri Apr 22 20:05:04 PDT 2011


Commit-ID:  884e5778c9b66b943eb02d8437bc0b26a219e2ec
Gitweb:     http://syslinux.zytor.com/commit/884e5778c9b66b943eb02d8437bc0b26a219e2ec
Author:     H. Peter Anvin <hpa at zytor.com>
AuthorDate: Tue, 8 Sep 2009 21:06:34 -0700
Committer:  Eric W. Biederman <ebiederm at xmission.com>
CommitDate: Fri, 8 Apr 2011 14:40:28 -0700

core: simple thread library

Simple thread library with the intent of making lwIP easier to port.
-- Modified to use milliseconds instead of jiffies,
   as lwIP expresses everything in milliseconds. EWB

Signed-off-by: H. Peter Anvin <hpa at zytor.com>
Signed-off-by: Eric W. Biederman <ebiederm at xmission.com>


---
 core/include/thread.h      |   84 ++++++++++++++++++++++++++++++++++++++++++++
 core/thread/exit_thread.c  |   26 +++++++++++++
 core/thread/idle_thread.c  |   25 +++++++++++++
 core/thread/kill_thread.c  |   39 ++++++++++++++++++++
 core/thread/root_thread.c  |    9 +++++
 core/thread/schedule.c     |   39 ++++++++++++++++++++
 core/thread/sem_asm.S      |   15 ++++++++
 core/thread/semaphore.c    |   74 ++++++++++++++++++++++++++++++++++++++
 core/thread/start_thread.c |   33 +++++++++++++++++
 core/thread/thread_asm.S   |   33 +++++++++++++++++
 core/thread/timeout.c      |   39 ++++++++++++++++++++
 11 files changed, 416 insertions(+), 0 deletions(-)

diff --git a/core/include/thread.h b/core/include/thread.h
new file mode 100644
index 0000000..917c36a
--- /dev/null
+++ b/core/include/thread.h
@@ -0,0 +1,84 @@
+#ifndef _THREAD_H
+#define _THREAD_H
+
+#include <stddef.h>
+#include <inttypes.h>
+#include "core.h"
+
+struct semaphore;
+
+struct thread_state {
+    uint32_t ebx, esp, ebp, esi, edi;
+};
+
+struct thread_list {
+    struct thread_list *next, *prev;
+};
+
+struct thread_block {
+    struct thread_list list;
+    struct thread *thread;
+    struct semaphore *semaphore;
+    mstime_t block_time;
+    mstime_t timeout;
+    bool timed_out;
+};
+
+struct thread {
+    struct thread_state state;
+    struct thread_list  list;
+    struct thread_block *blocked;
+    int prio;
+};
+
+void __schedule(void);
+void __switch_to(struct thread *);
+void thread_yield(void);
+
+extern struct thread *__current;
+static inline struct thread *current(void)
+{
+    return __current;
+}
+
+struct semaphore {
+    int count;
+    struct thread_list list;
+};
+
+mstime_t sem_down(struct semaphore *, mstime_t);
+void sem_up(struct semaphore *);
+void sem_init(struct semaphore *, int);
+
+typedef unsigned long irq_state_t;
+
+static inline irq_state_t irq_state(void)
+{
+    irq_state_t __st;
+
+    asm volatile("pushfl ; popl %0" : "=rm" (__st));
+    return __st;
+}
+
+static inline irq_state_t irq_save(void)
+{
+    irq_state_t __st;
+
+    asm volatile("pushfl ; popl %0 ; cli" : "=rm" (__st));
+    return __st;
+}
+
+static inline void irq_restore(irq_state_t __st)
+{
+    asm volatile("pushl %0 ; popfl" : : "rm" (__st));
+}
+
+void start_thread(struct thread *t, void *stack, size_t stack_size, int prio,
+		  void (*start_func)(void *), void *func_arg);
+void __exit_thread(void);
+void kill_thread(struct thread *);
+
+void start_idle_thread(void);
+void test_thread(void);
+
+#endif /* _THREAD_H */
diff --git a/core/thread/exit_thread.c b/core/thread/exit_thread.c
new file mode 100644
index 0000000..a5f12af
--- /dev/null
+++ b/core/thread/exit_thread.c
@@ -0,0 +1,26 @@
+#include "thread.h"
+#include <limits.h>
+
+__noreturn __exit_thread(void)
+{
+    irq_state_t irq;
+    struct thread *curr = current();
+
+    irq = irq_save();
+
+    /* Remove from the linked list */
+    curr->list.prev->next = curr->list.next;
+    curr->list.next->prev = curr->list.prev;
+
+    /*
+     * Note: __schedule() can explictly handle the case where
+     * curr isn't part of the linked list anymore, as long as
+     * curr->list.next is still valid.
+     */
+    __schedule();
+
+    /* We should never get here */
+    irq_restore(irq);
+    while (1)
+	asm volatile("hlt");
+}
diff --git a/core/thread/idle_thread.c b/core/thread/idle_thread.c
new file mode 100644
index 0000000..8a319ff
--- /dev/null
+++ b/core/thread/idle_thread.c
@@ -0,0 +1,25 @@
+#include "thread.h"
+#include <limits.h>
+#include <sys/cpu.h>
+
+static struct thread idle_thread;
+
+static char idle_thread_stack[4096];
+
+static void idle_thread_func(void *dummy)
+{
+    (void)dummy;
+    sti();
+
+    for (;;) {
+	thread_yield();
+	asm volatile("hlt");
+    }
+}
+
+void start_idle_thread(void)
+{
+    start_thread(&idle_thread, idle_thread_stack, sizeof idle_thread_stack,
+		 INT_MAX, idle_thread_func, NULL);
+}
+
diff --git a/core/thread/kill_thread.c b/core/thread/kill_thread.c
new file mode 100644
index 0000000..ed2e05f
--- /dev/null
+++ b/core/thread/kill_thread.c
@@ -0,0 +1,39 @@
+#include "thread.h"
+#include <limits.h>
+
+void kill_thread(struct thread *thread)
+{
+    irq_state_t irq;
+    struct thread_block *block;
+
+    if (thread == current())
+	__exit_thread();
+
+    irq = irq_save();
+
+    /*
+     * Muck with the stack so that the next time the thread is run then
+     * we end up going to __exit_thread.
+     */
+    *(size_t *)thread->state.esp = (size_t)__exit_thread;
+    thread->prio = INT_MIN;
+
+    block = thread->blocked;
+    if (block) {
+	struct semaphore *sem = block->semaphore;
+	/* Remove us from the queue and increase the count */
+	block->list.next->prev = block->list.prev;
+	block->list.prev->next = block->list.next;
+	sem->count++;
+
+	thread->blocked = NULL;
+	block->timed_out = true; /* Fake an immediate timeout */
+    }
+
+    __schedule();
+
+    irq_restore(irq);
+}
+
+
+
diff --git a/core/thread/root_thread.c b/core/thread/root_thread.c
new file mode 100644
index 0000000..c5efd65
--- /dev/null
+++ b/core/thread/root_thread.c
@@ -0,0 +1,9 @@
+#include "thread.h"
+
+struct thread __root_thread = {
+    .list = { .next = &__root_thread.list, .prev = &__root_thread.list },
+    .blocked = NULL,
+    .prio = 0,
+};
+
+struct thread *__current = &__root_thread;
diff --git a/core/thread/schedule.c b/core/thread/schedule.c
new file mode 100644
index 0000000..b139c43
--- /dev/null
+++ b/core/thread/schedule.c
@@ -0,0 +1,39 @@
+#include <sys/cpu.h>
+#include "thread.h"
+
+/*
+ * __schedule() should only be called with interrupts locked out!
+ */
+void __schedule(void)
+{
+    struct thread *curr = current();
+    struct thread *st, *nt, *best;
+
+    best = NULL;
+
+    /*
+     * The unusual form of this walk is because we have to start with
+     * the thread *following* curr, and curr may not actually be part
+     * of the list anymore (in the case of __exit_thread).
+     */
+    nt = st = container_of(curr->list.next, struct thread, list);
+    do {
+	if (!nt->blocked)
+	    if (!best || nt->prio < best->prio)
+		best = nt;
+	nt = container_of(nt->list.next, struct thread, list);
+    } while (nt != st);
+
+    if (best != curr)
+	__switch_to(best);
+}
+
+/*
+ * This can be called from "normal" code...
+ */
+void thread_yield(void)
+{
+    irq_state_t irq = irq_save();
+    __schedule();
+    irq_restore(irq);
+}
diff --git a/core/thread/sem_asm.S b/core/thread/sem_asm.S
new file mode 100644
index 0000000..2b3014f
--- /dev/null
+++ b/core/thread/sem_asm.S
@@ -0,0 +1,15 @@
+	.globl	sem_down
+	.type	sem_down, @function
+sem_down:
+	decl	(%eax)
+	js	__sem_down_slow
+	ret
+	.size	sem_down, .-sem_down
+
+	.globl	sem_up
+	.type	sem_up, @function
+sem_up:
+	incl	(%eax)
+	jle	__sem_up_slow
+	ret
+	.size	sem_up, .-sem_up
diff --git a/core/thread/semaphore.c b/core/thread/semaphore.c
new file mode 100644
index 0000000..9b216ed
--- /dev/null
+++ b/core/thread/semaphore.c
@@ -0,0 +1,74 @@
+#include <sys/cpu.h>
+#include "thread.h"
+
+void sem_init(struct semaphore *sem, int count)
+{
+    sem->list.next = sem->list.prev = &sem->list;
+    sem->count = count;
+}
+
+mstime_t __sem_down_slow(struct semaphore *sem, mstime_t timeout)
+{
+    struct thread *curr;
+    struct thread_block block;
+    irq_state_t irq;
+    mstime_t now;
+
+    irq = irq_save();
+
+    /* Check if something already freed the semaphore on us */
+    if (sem->count >= 0) {
+	sti();
+	return 0;
+    }
+
+    curr = current();
+    now = ms_timer();
+
+    block.thread     = curr;
+    block.semaphore  = sem;
+    block.block_time = now;
+    block.timeout    = timeout ? now+timeout : 0;
+    block.timed_out  = false;
+
+    curr->blocked    = &block;
+
+    /* Add to the end of the wakeup list */
+    block.list.prev       = sem->list.prev;
+    block.list.next       = &sem->list;
+    sem->list.prev        = &block.list;
+    block.list.prev->next = &block.list;
+
+    __schedule();
+
+    irq_restore(irq);
+    return block.timed_out ? -1 : ms_timer() - block.block_time;
+}
+
+void __sem_up_slow(struct semaphore *sem)
+{
+    irq_state_t irq;
+    struct thread_list *l;
+
+    irq = irq_save();
+
+    /*
+     * It's possible that something did a down on the semaphore, but
+     * didn't get to add themselves to the queue just yet.  In that case
+     * we don't have to do anything, since the bailout clause in
+     * __sem_down_slow will take care of it.
+     */
+    l = sem->list.next;
+    if (l != &sem->list) {
+	struct thread_block *block = container_of(l, struct thread_block, list);
+
+	sem->list.next = block->list.next;
+	block->list.next->prev = &sem->list;
+
+	block->thread->blocked = NULL;
+
+	__schedule();
+    }
+
+    irq_restore(irq);
+}
diff --git a/core/thread/start_thread.c b/core/thread/start_thread.c
new file mode 100644
index 0000000..f07984f
--- /dev/null
+++ b/core/thread/start_thread.c
@@ -0,0 +1,33 @@
+#include <string.h>
+#include "thread.h"
+
+extern void (*__start_thread)(void);
+
+void start_thread(struct thread *t, void *stack, size_t stack_size, int prio,
+		  void (*start_func)(void *), void *func_arg)
+{
+    irq_state_t irq;
+    struct thread *curr;
+
+    memset(t, 0, sizeof *t);
+
+    t->state.esp = (((size_t)stack + stack_size) & ~3) - 4;
+    *(size_t *)t->state.esp = (size_t)&__start_thread;
+
+    t->state.esi = (size_t)start_func;
+    t->state.edi = (size_t)func_arg;
+    t->state.ebx = irq_state();	/* Inherit the IRQ state from the spawner */
+    t->prio = prio;
+
+    irq = irq_save();
+    curr = current();
+
+    t->list.prev       = &curr->list;
+    t->list.next       = curr->list.next;
+    curr->list.next    = &t->list;
+    t->list.next->prev = &t->list;
+
+    __schedule();
+
+    irq_restore(irq);
+}
diff --git a/core/thread/thread_asm.S b/core/thread/thread_asm.S
new file mode 100644
index 0000000..64f9c9b
--- /dev/null
+++ b/core/thread/thread_asm.S
@@ -0,0 +1,33 @@
+	.globl	__switch_to
+	.type	__switch_to, @function
+__switch_to:
+	movl	__current, %edx
+	movl	%ebx,   (%edx)
+	movl	%esp,  4(%edx)
+	movl	%ebp,  8(%edx)
+	movl	%esi, 12(%edx)
+	movl	%edi, 16(%edx)
+
+	movl	  (%eax), %ebx
+	movl	 4(%eax), %esp
+	movl	 8(%eax), %ebp
+	movl	12(%eax), %esi
+	movl	16(%eax), %edi
+	movl	%eax, __current
+	ret
+	.size	__switch_to, .-__switch_to
+
+	.globl	__start_thread
+	.type	__start_thread, @function
+__start_thread:
+	movl	%edi, %eax		/* Thread function argument */
+
+	pushl	$0			/* For gdb's benefit */
+	movl	%esp, %ebp		/* For gdb's benefit */
+
+	pushl	%ebx			/* Set up the flags/interrupt state */
+	popfl
+
+	call	*%esi			/* Run the desired function */
+	jmp	__exit_thread		/* If we get here, kill the thread */
+	.size	__start_thread, .-__start_thread
diff --git a/core/thread/timeout.c b/core/thread/timeout.c
new file mode 100644
index 0000000..2ca0782
--- /dev/null
+++ b/core/thread/timeout.c
@@ -0,0 +1,39 @@
+/*
+ * timeout.c
+ *
+ */
+
+#include "thread.h"
+
+/*
+ * __thread_process_timeouts()
+ *
+ * Look for threads that have timed out.  This should be called
+ * under interrupt lock, before calling __schedule().
+ */
+void __thread_process_timeouts(void)
+{
+    struct thread *curr = current();
+    struct thread_list *tp;
+    struct thread *t;
+    mstime_t now = ms_timer();
+    struct thread_block *block;
+    mstime_t timeout;
+
+    /* The current thread is obviously running, so no need to check... */
+    for (tp = curr->list.next; tp != &curr->list; tp = tp->next) {
+	t = container_of(tp, struct thread, list);
+	if ((block = t->blocked) && (timeout = block->timeout)) {
+	    if ((mstimediff_t)(timeout - now) <= 0) {
+		struct semaphore *sem = block->semaphore;
+		/* Remove us from the queue and increase the count */
+		block->list.next->prev = block->list.prev;
+		block->list.prev->next = block->list.next;
+		sem->count++;
+
+		t->blocked = NULL;
+		block->timed_out = true;
+	    }
+	}
+    }
+}



More information about the Syslinux-commits mailing list