1 /*
2  * Copyright (c) 2013-2023 Ali Mashtizadeh
3  * All rights reserved.
4  */
5 
6 #include <stdbool.h>
7 #include <stdint.h>
8 #include <string.h>
9 
10 #include <errno.h>
11 #include <sys/syscall.h>
12 
13 #include <sys/kassert.h>
14 #include <sys/kconfig.h>
15 #include <sys/kdebug.h>
16 #include <sys/kmem.h>
17 #include <sys/ktime.h>
18 #include <sys/mp.h>
19 #include <sys/spinlock.h>
20 #include <sys/thread.h>
21 
22 #include <machine/trap.h>
23 #include <machine/pmap.h>
24 
25 /*
26  * Unfortunately the thread, process and scheduler code are pretty well
27  * integrated.  To avoid poluting the global namespace we import a few symbols
28  * from sched.c and process.c that are required during initialization and
29  * regular execution of the thread code.
30  */
31 
32 /* Globals declared in sched.c */
33 extern Spinlock schedLock;
34 extern ThreadQueue waitQueue;
35 extern ThreadQueue runnableQueue;
36 extern Thread *curProc[MAX_CPUS];
37 
38 /* Globals declared in process.c */
39 extern Spinlock procLock;
40 extern uint64_t nextProcessID;
41 extern ProcessQueue processList;
42 extern Slab processSlab;
43 
44 // Special Kernel Process
45 Process *kernelProcess;
46 
47 // Memory Pools
48 Slab threadSlab;
49 
50 void Handle_GlobalInit();
51 
52 void
Thread_Init()53 Thread_Init()
54 {
55     nextProcessID = 1;
56 
57     Slab_Init(&processSlab, "Process Objects", sizeof(Process), 16);
58     Slab_Init(&threadSlab, "Thread Objects", sizeof(Thread), 16);
59 
60     Spinlock_Init(&procLock, "Process List Lock", SPINLOCK_TYPE_NORMAL);
61     Spinlock_Init(&schedLock, "Scheduler Lock", SPINLOCK_TYPE_RECURSIVE);
62 
63     TAILQ_INIT(&waitQueue);
64     TAILQ_INIT(&runnableQueue);
65     TAILQ_INIT(&processList);
66 
67     Handle_GlobalInit();
68 
69     // Kernel Process
70     kernelProcess = Process_Create(NULL, "kernel");
71 
72     // Create an thread object for current context
73     Process *proc = Process_Create(NULL, "init");
74     curProc[0] = Thread_Create(proc);
75     curProc[0]->schedState = SCHED_STATE_RUNNING;
76 }
77 
78 void
Thread_InitAP()79 Thread_InitAP()
80 {
81     Thread *apthr = Thread_Create(kernelProcess);
82 
83     apthr->schedState = SCHED_STATE_RUNNING;
84 
85     //PAlloc_Release((void *)thr->kstack);
86     //thr->kstack = 0;
87 
88     curProc[CPU()] = apthr;
89 }
90 
91 /*
92  * Thread
93  */
94 
95 Thread *
Thread_Create(Process * proc)96 Thread_Create(Process *proc)
97 {
98     Thread *thr = (Thread *)Slab_Alloc(&threadSlab);
99 
100     if (!thr)
101 	return NULL;
102 
103     memset(thr, 0, sizeof(*thr));
104 
105     ASSERT(proc != NULL);
106 
107     thr->tid = proc->nextThreadID++;
108     thr->kstack = (uintptr_t)PAlloc_AllocPage();
109     if (thr->kstack == 0) {
110 	Slab_Free(&threadSlab, thr);
111 	return NULL;
112     }
113 
114     Process_Retain(proc);
115 
116     Spinlock_Lock(&proc->lock);
117     thr->proc = proc;
118     proc->threads++;
119     TAILQ_INSERT_TAIL(&proc->threadList, thr, threadList);
120     thr->space = proc->space;
121     thr->ustack = proc->ustackNext;
122     proc->ustackNext += MEM_USERSPACE_STKLEN;
123     Spinlock_Unlock(&proc->lock);
124 
125     thr->schedState = SCHED_STATE_NULL;
126     thr->timerEvt = NULL;
127     thr->refCount = 1;
128 
129     Thread_InitArch(thr);
130     // Initialize queue
131 
132     return thr;
133 }
134 
135 Thread *
Thread_KThreadCreate(void (* f)(void *),void * arg)136 Thread_KThreadCreate(void (*f)(void *), void *arg)
137 {
138     Thread *thr = Thread_Create(kernelProcess);
139     if (!thr)
140 	return NULL;
141 
142     Thread_SetupKThread(thr, f, (uintptr_t)arg, 0, 0);
143 
144     return thr;
145 }
146 
147 Thread *
Thread_UThreadCreate(Thread * oldThr,uint64_t rip,uint64_t arg)148 Thread_UThreadCreate(Thread *oldThr, uint64_t rip, uint64_t arg)
149 {
150     Process *proc = oldThr->proc;
151     Thread *thr = (Thread *)Slab_Alloc(&threadSlab);
152 
153     if (!thr)
154 	return NULL;
155 
156     memset(thr, 0, sizeof(*thr));
157 
158     thr->tid = proc->nextThreadID++;
159     thr->kstack = (uintptr_t)PAlloc_AllocPage();
160     if (thr->kstack == 0) {
161 	Slab_Free(&threadSlab, thr);
162 	return NULL;
163     }
164 
165     thr->space = oldThr->space;
166     thr->schedState = SCHED_STATE_NULL;
167     thr->refCount = 1;
168 
169     Spinlock_Lock(&proc->lock);
170     thr->ustack = proc->ustackNext;
171     proc->ustackNext += MEM_USERSPACE_STKLEN;
172     Spinlock_Unlock(&proc->lock);
173 
174     PMap_AllocMap(thr->space, thr->ustack, MEM_USERSPACE_STKLEN, PTE_W);
175     // XXX: Check failure
176 
177     Thread_InitArch(thr);
178     // Initialize queue
179 
180     Thread_SetupUThread(thr, rip, arg);
181 
182     Process_Retain(proc);
183 
184     Spinlock_Lock(&proc->lock);
185     thr->proc = proc;
186     // XXX: Process lock
187     proc->threads++;
188     TAILQ_INSERT_TAIL(&proc->threadList, thr, threadList);
189     Spinlock_Unlock(&proc->lock);
190 
191     return thr;
192 }
193 
194 static void
Thread_Destroy(Thread * thr)195 Thread_Destroy(Thread *thr)
196 {
197     Process *proc = thr->proc;
198 
199     // Don't free kernel threads
200     ASSERT(proc->pid != 1);
201 
202     // Free userspace stack
203 
204     Spinlock_Lock(&proc->lock);
205     proc->threads--;
206     TAILQ_REMOVE(&proc->threadList, thr, threadList);
207     Spinlock_Unlock(&proc->lock);
208 
209     // Free AS
210     PAlloc_Release((void *)thr->kstack);
211 
212     // Release process handle
213     Process_Release(thr->proc);
214 
215     Slab_Free(&threadSlab, thr);
216 }
217 
218 /**
219  * Thread_Lookup --
220  *
221  * Lookup a thread by TID and increment its reference count.
222  *
223  * @param [in] proc Process within which to find a specific thread.
224  * @param [in] tid Thread ID of the thread to find.
225  *
226  * @retval NULL if the thread isn't found.
227  */
228 Thread *
Thread_Lookup(Process * proc,uint64_t tid)229 Thread_Lookup(Process *proc, uint64_t tid)
230 {
231     Thread *t;
232     Thread *thr = NULL;
233 
234     Spinlock_Lock(&proc->lock);
235     TAILQ_FOREACH(t, &proc->threadList, threadList) {
236 	if (t->tid == tid) {
237 	    Thread_Retain(t);
238 	    thr = t;
239 	    break;
240 	}
241     }
242     Spinlock_Unlock(&proc->lock);
243 
244     return thr;
245 }
246 
247 /**
248  * Thread_Retain --
249  *
250  * Increment the reference count for a given thread.
251  */
252 void
Thread_Retain(Thread * thr)253 Thread_Retain(Thread *thr)
254 {
255     ASSERT(thr->refCount != 0);
256     __sync_fetch_and_add(&thr->refCount, 1);
257 }
258 
259 /**
260  * Thread_Release --
261  *
262  * Decrement the reference count for a given thread.
263  */
264 void
Thread_Release(Thread * thr)265 Thread_Release(Thread *thr)
266 {
267     ASSERT(thr->refCount != 0);
268     if (__sync_fetch_and_sub(&thr->refCount, 1) == 1) {
269 	Thread_Destroy(thr);
270     }
271 }
272 
273 /**
274  * Thread_Wait --
275  *
276  * Wait for any thread (tid == TID_ANY) or a specific thread.
277  */
278 uint64_t
Thread_Wait(Thread * thr,uint64_t tid)279 Thread_Wait(Thread *thr, uint64_t tid)
280 {
281     Thread *t;
282     uint64_t status;
283 
284     ASSERT(thr->proc != NULL);
285 
286     if (tid == TID_ANY) {
287 	t = TAILQ_FIRST(&thr->proc->zombieQueue);
288 	if (!t) {
289 	    return SYSCALL_PACK(EAGAIN, 0);
290 	}
291 
292 	TAILQ_REMOVE(&thr->proc->zombieQueue, t, schedQueue);
293 	status = t->exitValue;
294 	Thread_Release(t);
295 	return SYSCALL_PACK(0, status);
296     }
297 
298     // XXXURGENT
299     TAILQ_FOREACH(t, &thr->proc->zombieQueue, schedQueue) {
300 	if (t->tid == tid) {
301 	    TAILQ_REMOVE(&thr->proc->zombieQueue, t, schedQueue);
302 	    status = t->exitValue;
303 	    Thread_Release(t);
304 	    return SYSCALL_PACK(0, status);
305 	}
306     }
307 
308     return 0;
309 }
310 
311 extern TaskStateSegment64 TSS[MAX_CPUS];
312 
313 void
ThreadKThreadEntry(TrapFrame * tf)314 ThreadKThreadEntry(TrapFrame *tf) __NO_LOCK_ANALYSIS
315 {
316     TSS[CPU()].rsp0 = curProc[CPU()]->kstack + 4096;
317 
318     Spinlock_Unlock(&schedLock);
319 
320     Trap_Pop(tf);
321 }
322 
323 /*
324  * Debugging
325  */
326 
327 void
Thread_Dump(Thread * thr)328 Thread_Dump(Thread *thr)
329 {
330     const char *states[] = {
331 	"NULL",
332 	"RUNNABLE",
333 	"RUNNING",
334 	"WAITING",
335 	"ZOMBIE"
336     };
337 
338     // Thread_DumpArch(thr)
339     kprintf("space      %016llx\n", thr->space);
340     kprintf("kstack     %016llx\n", thr->kstack);
341     kprintf("tid        %llu\n", thr->tid);
342     kprintf("refCount   %d\n", thr->refCount);
343     kprintf("state      %s\n", states[thr->schedState]);
344     kprintf("ctxswtch   %llu\n", thr->ctxSwitches);
345     kprintf("utime      %llu\n", thr->userTime);
346     kprintf("ktime      %llu\n", thr->kernTime);
347     kprintf("wtime      %llu\n", thr->waitTime);
348     if (thr->proc) {
349 	Process_Dump(thr->proc);
350     }
351 }
352 
353 static void
Debug_Threads(int argc,const char * argv[])354 Debug_Threads(int argc, const char *argv[])
355 {
356     Thread *thr;
357 
358     //Spinlock_Lock(&threadLock);
359 
360     for (int i = 0; i < MAX_CPUS; i++) {
361 	thr = curProc[i];
362 	if (thr) {
363 	    kprintf("Running Thread CPU %d: %d(%016llx) %d\n", i, thr->tid, thr, thr->ctxSwitches);
364 	    Thread_Dump(thr);
365 	}
366     }
367     TAILQ_FOREACH(thr, &runnableQueue, schedQueue)
368     {
369 	kprintf("Runnable Thread: %d(%016llx) %d\n", thr->tid, thr, thr->ctxSwitches);
370 	Thread_Dump(thr);
371     }
372     TAILQ_FOREACH(thr, &waitQueue, schedQueue)
373     {
374 	kprintf("Waiting Thread: %d(%016llx) %d\n", thr->tid, thr, thr->ctxSwitches);
375 	Thread_Dump(thr);
376     }
377 
378     //Spinlock_Unlock(&threadLock);
379 }
380 
381 REGISTER_DBGCMD(threads, "Display list of threads", Debug_Threads);
382 
383 static void
Debug_ThreadInfo(int argc,const char * argv[])384 Debug_ThreadInfo(int argc, const char *argv[])
385 {
386     Thread *thr = curProc[CPU()];
387 
388     kprintf("Current Thread State:\n");
389     Thread_Dump(thr);
390 }
391 
392 REGISTER_DBGCMD(threadinfo, "Display current thread state", Debug_ThreadInfo);
393 
394