-
Notifications
You must be signed in to change notification settings - Fork 9
/
sqlite_s3vfs.py
204 lines (159 loc) · 6.58 KB
/
sqlite_s3vfs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import uuid
import apsw
class S3VFS(apsw.VFS):
def __init__(self, bucket, block_size=4096):
self.name = f's3vfs-{str(uuid.uuid4())}'
self._bucket = bucket
self._block_size = block_size
super().__init__(name=self.name, base='')
def xAccess(self, pathname, flags):
return (
flags == apsw.mapping_access["SQLITE_ACCESS_EXISTS"]
and any(self._bucket.objects.filter(Prefix=pathname + '/'))
) or (
flags != apsw.mapping_access["SQLITE_ACCESS_EXISTS"]
)
def xFullPathname(self, filename):
return filename
def xDelete(self, filename, syncdir):
self._bucket.objects.filter(Prefix=filename + '/').delete()
def xOpen(self, name, flags):
return S3VFSFile(name, flags, self._bucket, self._block_size)
def serialize_iter(self, key_prefix):
for obj in self._bucket.objects.filter(Prefix=key_prefix + '/'):
yield from obj.get()['Body'].iter_chunks()
def serialize_fileobj(self, key_prefix):
chunk = b''
offset = 0
it = iter(self.serialize_iter(key_prefix))
def up_to_iter(num):
nonlocal chunk, offset
while num:
if offset == len(chunk):
try:
chunk = next(it)
except StopIteration:
break
else:
offset = 0
to_yield = min(num, len(chunk) - offset)
offset = offset + to_yield
num -= to_yield
yield chunk[offset - to_yield:offset]
class FileLikeObj:
def read(self, n=-1):
n = \
n if n != -1 else \
4294967294 * 65536 # max size of SQLite file
return b''.join(up_to_iter(n))
return FileLikeObj()
def deserialize_iter(self, key_prefix, bytes_iter):
chunk = b''
offset = 0
it = iter(bytes_iter)
def up_to_iter(num):
nonlocal chunk, offset
while num:
if offset == len(chunk):
try:
chunk = next(it)
except StopIteration:
break
else:
offset = 0
to_yield = min(num, len(chunk) - offset)
offset = offset + to_yield
num -= to_yield
yield chunk[offset - to_yield:offset]
def block_bytes_iter():
while True:
block = b''.join(up_to_iter(self._block_size))
if not block:
break
yield block
for block, block_bytes in enumerate(block_bytes_iter()):
self._bucket.Object(f'{key_prefix}/{block:010d}').put(Body=block_bytes)
class S3VFSFile:
def __init__(self, name, flags, bucket, block_size):
self._key_prefix = \
self._key_prefix = name.filename() if isinstance(name, apsw.URIFilename) else \
name
self._bucket = bucket
self._block_size = block_size
def _blocks(self, offset, amount):
while amount > 0:
block = offset // self._block_size # which block to get
start = offset % self._block_size # place in block to start
consume = min(self._block_size - start, amount)
yield (block, start, consume)
amount -= consume
offset += consume
def _block_object(self, block):
return self._bucket.Object(f'{self._key_prefix}/{block:010d}')
def _block_bytes(self, block):
try:
block_bytes = self._block_object(block).get()["Body"].read()
except self._bucket.meta.client.exceptions.NoSuchKey as e:
block_bytes = b''
return block_bytes
def xRead(self, amount, offset):
def _read():
for block, start, consume in self._blocks(offset, amount):
block_bytes = self._block_bytes(block)
yield block_bytes[start:start+consume]
return b"".join(_read())
def xSectorSize(self):
return 0
def xFileControl(self, *args):
return False
def xCheckReservedLock(self):
return False
def xLock(self, level):
pass
def xUnlock(self, level):
pass
def xClose(self):
pass
def xFileSize(self):
return sum(o.size for o in self._bucket.objects.filter(Prefix=self._key_prefix + "/"))
def xSync(self, flags):
return True
def xTruncate(self, newsize):
total = 0
for obj in self._bucket.objects.filter(Prefix=self._key_prefix + "/"):
total += obj.size
to_keep = max(obj.size - total + newsize, 0)
if to_keep == 0:
obj.delete()
elif to_keep < obj.size:
obj.put(Body=obj.get()['Body'].read()[:to_keep])
return True
def xWrite(self, data, offset):
lock_page_offset = 1073741824
page_size = len(data)
if offset == lock_page_offset + page_size:
# Ensure the previous blocks have enough bytes for size calculations and serialization.
# SQLite seems to always write pages sequentially, except that it skips the byte-lock
# page, so we only check previous blocks if we know we're just after the byte-lock
# page.
data_first_block = offset // self._block_size
lock_page_block = lock_page_offset // self._block_size
for block in range(data_first_block - 1, lock_page_block - 1, -1):
original_block_bytes = self._block_bytes(block)
if len(original_block_bytes) == self._block_size:
break
self._block_object(block).put(Body=original_block_bytes + bytes(
self._block_size - len(original_block_bytes)
))
data_offset = 0
for block, start, write in self._blocks(offset, len(data)):
data_to_write = data[data_offset:data_offset+write]
if start != 0 or len(data_to_write) != self._block_size:
original_block_bytes = self._block_bytes(block)
original_block_bytes = original_block_bytes + bytes(max(start - len(original_block_bytes), 0))
data_to_write = \
original_block_bytes[0:start] + \
data_to_write + \
original_block_bytes[start+write:]
data_offset += write
self._block_object(block).put(Body=data_to_write)