-
Notifications
You must be signed in to change notification settings - Fork 0
/
ebr.c
178 lines (151 loc) · 4.63 KB
/
ebr.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (c) Thomas E. Hart.
*/
#include "ebr.h"
#include "test.h"
#include "allocator.h"
#include "util.h"
#include "spinlock.h"
#include <stdio.h>
#define UPDATE_THRESHOLD 100
struct ebr_globals {
spinlock_t update_lock __attribute__ ((__aligned__(CACHESIZE)));
int global_epoch __attribute__ ((__aligned__(CACHESIZE)));
};
static struct ebr_globals *eg __attribute__ ((__aligned__(CACHESIZE)));
/* Processes a list of callbacks.
*
* @list: Pointer to list of node_t's.
*/
void process_callbacks_epoch(node_t **list)
{
node_t *next;
unsigned long num = 0;
/* This is only called from critical_enter(), which starts with a
* memory barrier; therefore, we don't need a write barrier here to
* ensure that previous deletions have been seen by all processors.
*/
for (; (*list) != NULL; (*list) = next) {
next = (*list)->mr_next;
free_node(*list);
num++;
}
/* Update our accounting information. */
this_thread.retire_count -= num;
}
void update_epoch()
{
int curr_epoch;
if (!spin_trylock(&eg->update_lock)) {
/* Someone could be preempted while holding the update lock. Give
* them the CPU. */
/* cond_yield(); */
return;
}
/* If any CPU hasn't advanced to the current epoch, abort the attempt. */
curr_epoch = eg->global_epoch;
for (size_t i = 0; i < n_threads; ++i) {
if (threads[i].in_critical == 1 &&
threads[i].epoch != curr_epoch) {
spin_unlock(&eg->update_lock);
/* Give other threads a chance to see the global epoch. */
/* Only do it if out of memory. */
/* cond_yield(); */
return;
}
}
/* Update the global epoch.
*
* I wanted to use CAS here, but that would be unsafe due to
* wraparound. */
eg->global_epoch = (curr_epoch + 1) % N_EPOCHS;
spin_unlock(&eg->update_lock);
return;
}
void critical_enter()
{
struct per_thread_t t = this_thread;
int epoch;
retry:
t.in_critical = 1;
memory_barrier(); /* Not safe to proceed until our flag is visible. */
epoch = eg->global_epoch;
if (t.epoch != epoch) { /* New epoch. */
/* Process callbacks for old 'incarnation' of this epoch. */
process_callbacks_epoch(&t.limbo_list[epoch]);
t.epoch = epoch;
t.entries_since_update = 0;
} else if (t.entries_since_update++ == UPDATE_THRESHOLD) {
t.in_critical = 0;
t.entries_since_update = 0;
update_epoch();
goto retry;
}
return; /* We're now in a critical section. */
}
void critical_exit()
{
memory_barrier(); /* Can't let flag be unset while we're still in
* a critical section! */
this_thread.in_critical = 0;
}
void mr_init()
{
int i, j;
eg = (struct ebr_globals*)mapmem(sizeof(struct ebr_globals));
for (i = 0; i < MAX_THREADS; i++) {
threads[i].epoch = 0;
threads[i].in_critical = 0;
threads[i].entries_since_update = 0;
threads[i].retire_count = 0;
for (j = 0; j < N_EPOCHS; j++) {
threads[i].limbo_list[j] = NULL;
}
}
eg->global_epoch = 1;
eg->update_lock = SPIN_LOCK_UNLOCKED;
}
void mr_thread_exit()
{
while (this_thread.retire_count > 0) {
update_epoch();
critical_enter();
critical_exit();
cond_yield();
}
}
void mr_reinitialize()
{
int i;
for (i = 0; i < MAX_THREADS; i++) {
threads[i].epoch = 0;
threads[i].in_critical = 0;
threads[i].entries_since_update = 0;
threads[i].retire_count = 0;
}
eg->global_epoch = 1;
eg->update_lock = SPIN_LOCK_UNLOCKED;
}
/* Links the node into the per-thread list of pending deletions.
*/
void free_node_later(node_t *q)
{
struct per_thread_t t = this_thread;
q->mr_next = t.limbo_list[t.epoch];
t.limbo_list[t.epoch] = q;
t.retire_count++;
}