forked from cms-patatrack/pixeltrack-standalone
-
Notifications
You must be signed in to change notification settings - Fork 0
/
GPUSimpleVector.h
138 lines (118 loc) · 3.79 KB
/
GPUSimpleVector.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#ifndef HeterogeneousCore_CUDAUtilities_interface_GPUSimpleVector_h
#define HeterogeneousCore_CUDAUtilities_interface_GPUSimpleVector_h
// author: Felice Pantaleo, CERN, 2018
#include <type_traits>
#include <utility>
#include "CUDACore/cudaCompat.h"
namespace GPU {
template <class T>
struct SimpleVector {
constexpr SimpleVector() = default;
// ownership of m_data stays within the caller
constexpr void construct(int capacity, T *data) {
m_size = 0;
m_capacity = capacity;
m_data = data;
}
inline constexpr int push_back_unsafe(const T &element) {
auto previousSize = m_size;
m_size++;
if (previousSize < m_capacity) {
m_data[previousSize] = element;
return previousSize;
} else {
--m_size;
return -1;
}
}
template <class... Ts>
constexpr int emplace_back_unsafe(Ts &&... args) {
auto previousSize = m_size;
m_size++;
if (previousSize < m_capacity) {
(new (&m_data[previousSize]) T(std::forward<Ts>(args)...));
return previousSize;
} else {
--m_size;
return -1;
}
}
__device__ inline T &back() { return m_data[m_size - 1]; }
__device__ inline const T &back() const {
if (m_size > 0) {
return m_data[m_size - 1];
} else
return T(); //undefined behaviour
}
// thread-safe version of the vector, when used in a CUDA kernel
__device__ int push_back(const T &element) {
auto previousSize = atomicAdd(&m_size, 1);
if (previousSize < m_capacity) {
m_data[previousSize] = element;
return previousSize;
} else {
atomicSub(&m_size, 1);
return -1;
}
}
template <class... Ts>
__device__ int emplace_back(Ts &&... args) {
auto previousSize = atomicAdd(&m_size, 1);
if (previousSize < m_capacity) {
(new (&m_data[previousSize]) T(std::forward<Ts>(args)...));
return previousSize;
} else {
atomicSub(&m_size, 1);
return -1;
}
}
// thread safe version of resize
__device__ int extend(int size = 1) {
auto previousSize = atomicAdd(&m_size, size);
if (previousSize < m_capacity) {
return previousSize;
} else {
atomicSub(&m_size, size);
return -1;
}
}
__device__ int shrink(int size = 1) {
auto previousSize = atomicSub(&m_size, size);
if (previousSize >= size) {
return previousSize - size;
} else {
atomicAdd(&m_size, size);
return -1;
}
}
inline constexpr bool empty() const { return m_size <= 0; }
inline constexpr bool full() const { return m_size >= m_capacity; }
inline constexpr T &operator[](int i) { return m_data[i]; }
inline constexpr const T &operator[](int i) const { return m_data[i]; }
inline constexpr void reset() { m_size = 0; }
inline constexpr int size() const { return m_size; }
inline constexpr int capacity() const { return m_capacity; }
inline constexpr T const *data() const { return m_data; }
inline constexpr void resize(int size) { m_size = size; }
inline constexpr void set_data(T *data) { m_data = data; }
private:
int m_size;
int m_capacity;
T *m_data;
};
// ownership of m_data stays within the caller
template <class T>
SimpleVector<T> make_SimpleVector(int capacity, T *data) {
SimpleVector<T> ret;
ret.construct(capacity, data);
return ret;
}
// ownership of m_data stays within the caller
template <class T>
SimpleVector<T> *make_SimpleVector(SimpleVector<T> *mem, int capacity, T *data) {
auto ret = new (mem) SimpleVector<T>();
ret->construct(capacity, data);
return ret;
}
} // namespace GPU
#endif // HeterogeneousCore_CUDAUtilities_interface_GPUSimpleVector_h