1 /*
2 * Copyright (c) 2022-2023 Ali Mashtizadeh
3 * All rights reserved.
4 */
5
6 #include <stdbool.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 #include <sys/kassert.h>
11 #include <sys/kconfig.h>
12 #include <sys/kdebug.h>
13 #include <sys/ktime.h>
14 #include <sys/mp.h>
15 #include <sys/spinlock.h>
16
17 #include <machine/atomic.h>
18 #include <machine/amd64.h>
19 #include <machine/amd64op.h>
20
21 Spinlock lockListLock = {
22 0, 0, 0, 0, 0, 0, 0,
23 SPINLOCK_TYPE_NORMAL,
24 "SPINLOCK LIST",
25 };
26 LIST_HEAD(LockListHead, Spinlock) lockList = LIST_HEAD_INITIALIZER(lockList);
27
TAILQ_HEAD(LockStack,Spinlock)28 TAILQ_HEAD(LockStack, Spinlock) lockStack[MAX_CPUS];
29
30 extern uint64_t ticksPerSecond;
31
32 void
33 Spinlock_EarlyInit()
34 {
35 int c;
36
37 for (c = 0; c < MAX_CPUS; c++) {
38 TAILQ_INIT(&lockStack[c]);
39 }
40 }
41
42 void
Spinlock_Init(Spinlock * lock,const char * name,uint64_t type)43 Spinlock_Init(Spinlock *lock, const char *name, uint64_t type)
44 {
45 lock->lock = 0;
46 lock->cpu = 0;
47 lock->count = 0;
48 lock->rCount = 0;
49 lock->lockTime = 0;
50 lock->waitTime = 0;
51 lock->type = type;
52
53 strncpy(&lock->name[0], name, SPINLOCK_NAMELEN);
54
55 Spinlock_Lock(&lockListLock);
56 LIST_INSERT_HEAD(&lockList, lock, lockList);
57 Spinlock_Unlock(&lockListLock);
58 }
59
60 void
Spinlock_Destroy(Spinlock * lock)61 Spinlock_Destroy(Spinlock *lock)
62 {
63 Spinlock_Lock(&lockListLock);
64 LIST_REMOVE(lock, lockList);
65 Spinlock_Unlock(&lockListLock);
66 }
67
68 /**
69 * Spinlock_Lock --
70 *
71 * Spin until we acquire the spinlock. This will also disable interrupts to
72 * prevent deadlocking with interrupt handlers.
73 */
74 void
Spinlock_Lock(Spinlock * lock)75 Spinlock_Lock(Spinlock *lock) __NO_LOCK_ANALYSIS
76 {
77 uint64_t startTSC;
78 Critical_Enter();
79
80 startTSC = Time_GetTSC();
81 while (atomic_swap_uint64(&lock->lock, 1) == 1)
82 {
83 if (lock->type == SPINLOCK_TYPE_RECURSIVE && lock->cpu == CPU()) {
84 break;
85 }
86 if ((Time_GetTSC() - startTSC) / ticksPerSecond > 1) {
87 kprintf("Spinlock_Lock(%s): waiting for over a second!\n", lock->name);
88 breakpoint();
89 }
90 }
91 lock->waitTime += Time_GetTSC() - startTSC;
92
93 lock->cpu = CPU();
94 lock->count++;
95
96 lock->rCount++;
97 if (lock->rCount == 1)
98 lock->lockedTSC = Time_GetTSC();
99
100 TAILQ_INSERT_TAIL(&lockStack[CPU()], lock, lockStack);
101 }
102
103 /**
104 * Spinlock_Unlock --
105 *
106 * Release the spinlock. This will re-enable interrupts.
107 */
108 void
Spinlock_Unlock(Spinlock * lock)109 Spinlock_Unlock(Spinlock *lock) __NO_LOCK_ANALYSIS
110 {
111 ASSERT(lock->cpu == CPU());
112
113 TAILQ_REMOVE(&lockStack[CPU()], lock, lockStack);
114
115 lock->rCount--;
116 if (lock->rCount == 0) {
117 lock->cpu = 0;
118 lock->lockTime += Time_GetTSC() - lock->lockedTSC;
119 atomic_set_uint64(&lock->lock, 0);
120 }
121
122 Critical_Exit();
123 }
124
125 bool
Spinlock_IsHeld(Spinlock * lock)126 Spinlock_IsHeld(Spinlock *lock)
127 {
128 return (lock->cpu == CPU()) && (lock->lock == 1);
129 }
130
131 void
Debug_Spinlocks(int argc,const char * argv[])132 Debug_Spinlocks(int argc, const char *argv[])
133 {
134 Spinlock *lock;
135
136 Spinlock_Lock(&lockListLock);
137
138 kprintf("%-36s Locked CPU Count WaitTime LockTime\n", "Lock Name");
139 LIST_FOREACH(lock, &lockList, lockList)
140 {
141 kprintf("%-36s %6llu %3llu %8llu %12llu %12llu\n", lock->name,
142 lock->lock, lock->cpu, lock->count,
143 lock->waitTime, lock->lockTime);
144 }
145
146 Spinlock_Unlock(&lockListLock);
147 }
148
149 REGISTER_DBGCMD(spinlocks, "Display list of spinlocks", Debug_Spinlocks);
150
151 void
Debug_LockStack(int argc,const char * argv[])152 Debug_LockStack(int argc, const char *argv[])
153 {
154 int c = CPU();
155 Spinlock *lock;
156
157 kprintf("Lock Stack:\n");
158 TAILQ_FOREACH(lock, &lockStack[c], lockStack) {
159 kprintf(" %s\n", lock->name);
160 }
161 }
162
163 REGISTER_DBGCMD(lockstack, "Display stack of held spinlocks", Debug_LockStack);
164
165