##// END OF EJS Templates
store: use StoreEntry API instead of parsing filename in remotefilelog...
marmoute -
r51378:5805b8b2 default
parent child Browse files
Show More
@@ -1,443 +1,442 b''
1 # remotefilelogserver.py - server logic for a remotefilelog server
1 # remotefilelogserver.py - server logic for a remotefilelog server
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import stat
9 import stat
10 import time
10 import time
11 import zlib
11 import zlib
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.node import bin, hex
14 from mercurial.node import bin, hex
15 from mercurial.pycompat import open
15 from mercurial.pycompat import open
16 from mercurial import (
16 from mercurial import (
17 changegroup,
17 changegroup,
18 changelog,
18 changelog,
19 context,
19 context,
20 error,
20 error,
21 extensions,
21 extensions,
22 match,
22 match,
23 scmutil,
23 scmutil,
24 store,
24 store,
25 streamclone,
25 streamclone,
26 util,
26 util,
27 wireprotoserver,
27 wireprotoserver,
28 wireprototypes,
28 wireprototypes,
29 wireprotov1server,
29 wireprotov1server,
30 )
30 )
31 from . import (
31 from . import (
32 constants,
32 constants,
33 shallowutil,
33 shallowutil,
34 )
34 )
35
35
36 _sshv1server = wireprotoserver.sshv1protocolhandler
36 _sshv1server = wireprotoserver.sshv1protocolhandler
37
37
38
38
39 def setupserver(ui, repo):
39 def setupserver(ui, repo):
40 """Sets up a normal Mercurial repo so it can serve files to shallow repos."""
40 """Sets up a normal Mercurial repo so it can serve files to shallow repos."""
41 onetimesetup(ui)
41 onetimesetup(ui)
42
42
43 # don't send files to shallow clients during pulls
43 # don't send files to shallow clients during pulls
44 def generatefiles(
44 def generatefiles(
45 orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
45 orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
46 ):
46 ):
47 caps = self._bundlecaps or []
47 caps = self._bundlecaps or []
48 if constants.BUNDLE2_CAPABLITY in caps:
48 if constants.BUNDLE2_CAPABLITY in caps:
49 # only send files that don't match the specified patterns
49 # only send files that don't match the specified patterns
50 includepattern = None
50 includepattern = None
51 excludepattern = None
51 excludepattern = None
52 for cap in self._bundlecaps or []:
52 for cap in self._bundlecaps or []:
53 if cap.startswith(b"includepattern="):
53 if cap.startswith(b"includepattern="):
54 includepattern = cap[len(b"includepattern=") :].split(b'\0')
54 includepattern = cap[len(b"includepattern=") :].split(b'\0')
55 elif cap.startswith(b"excludepattern="):
55 elif cap.startswith(b"excludepattern="):
56 excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
56 excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
57
57
58 m = match.always()
58 m = match.always()
59 if includepattern or excludepattern:
59 if includepattern or excludepattern:
60 m = match.match(
60 m = match.match(
61 repo.root, b'', None, includepattern, excludepattern
61 repo.root, b'', None, includepattern, excludepattern
62 )
62 )
63
63
64 changedfiles = list([f for f in changedfiles if not m(f)])
64 changedfiles = list([f for f in changedfiles if not m(f)])
65 return orig(
65 return orig(
66 self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
66 self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
67 )
67 )
68
68
69 extensions.wrapfunction(
69 extensions.wrapfunction(
70 changegroup.cgpacker, b'generatefiles', generatefiles
70 changegroup.cgpacker, b'generatefiles', generatefiles
71 )
71 )
72
72
73
73
74 onetime = False
74 onetime = False
75
75
76
76
77 def onetimesetup(ui):
77 def onetimesetup(ui):
78 """Configures the wireprotocol for both clients and servers."""
78 """Configures the wireprotocol for both clients and servers."""
79 global onetime
79 global onetime
80 if onetime:
80 if onetime:
81 return
81 return
82 onetime = True
82 onetime = True
83
83
84 # support file content requests
84 # support file content requests
85 wireprotov1server.wireprotocommand(
85 wireprotov1server.wireprotocommand(
86 b'x_rfl_getflogheads', b'path', permission=b'pull'
86 b'x_rfl_getflogheads', b'path', permission=b'pull'
87 )(getflogheads)
87 )(getflogheads)
88 wireprotov1server.wireprotocommand(
88 wireprotov1server.wireprotocommand(
89 b'x_rfl_getfiles', b'', permission=b'pull'
89 b'x_rfl_getfiles', b'', permission=b'pull'
90 )(getfiles)
90 )(getfiles)
91 wireprotov1server.wireprotocommand(
91 wireprotov1server.wireprotocommand(
92 b'x_rfl_getfile', b'file node', permission=b'pull'
92 b'x_rfl_getfile', b'file node', permission=b'pull'
93 )(getfile)
93 )(getfile)
94
94
95 class streamstate:
95 class streamstate:
96 match = None
96 match = None
97 shallowremote = False
97 shallowremote = False
98 noflatmf = False
98 noflatmf = False
99
99
100 state = streamstate()
100 state = streamstate()
101
101
102 def stream_out_shallow(repo, proto, other):
102 def stream_out_shallow(repo, proto, other):
103 includepattern = None
103 includepattern = None
104 excludepattern = None
104 excludepattern = None
105 raw = other.get(b'includepattern')
105 raw = other.get(b'includepattern')
106 if raw:
106 if raw:
107 includepattern = raw.split(b'\0')
107 includepattern = raw.split(b'\0')
108 raw = other.get(b'excludepattern')
108 raw = other.get(b'excludepattern')
109 if raw:
109 if raw:
110 excludepattern = raw.split(b'\0')
110 excludepattern = raw.split(b'\0')
111
111
112 oldshallow = state.shallowremote
112 oldshallow = state.shallowremote
113 oldmatch = state.match
113 oldmatch = state.match
114 oldnoflatmf = state.noflatmf
114 oldnoflatmf = state.noflatmf
115 try:
115 try:
116 state.shallowremote = True
116 state.shallowremote = True
117 state.match = match.always()
117 state.match = match.always()
118 state.noflatmf = other.get(b'noflatmanifest') == b'True'
118 state.noflatmf = other.get(b'noflatmanifest') == b'True'
119 if includepattern or excludepattern:
119 if includepattern or excludepattern:
120 state.match = match.match(
120 state.match = match.match(
121 repo.root, b'', None, includepattern, excludepattern
121 repo.root, b'', None, includepattern, excludepattern
122 )
122 )
123 streamres = wireprotov1server.stream(repo, proto)
123 streamres = wireprotov1server.stream(repo, proto)
124
124
125 # Force the first value to execute, so the file list is computed
125 # Force the first value to execute, so the file list is computed
126 # within the try/finally scope
126 # within the try/finally scope
127 first = next(streamres.gen)
127 first = next(streamres.gen)
128 second = next(streamres.gen)
128 second = next(streamres.gen)
129
129
130 def gen():
130 def gen():
131 yield first
131 yield first
132 yield second
132 yield second
133 for value in streamres.gen:
133 for value in streamres.gen:
134 yield value
134 yield value
135
135
136 return wireprototypes.streamres(gen())
136 return wireprototypes.streamres(gen())
137 finally:
137 finally:
138 state.shallowremote = oldshallow
138 state.shallowremote = oldshallow
139 state.match = oldmatch
139 state.match = oldmatch
140 state.noflatmf = oldnoflatmf
140 state.noflatmf = oldnoflatmf
141
141
142 wireprotov1server.commands[b'stream_out_shallow'] = (
142 wireprotov1server.commands[b'stream_out_shallow'] = (
143 stream_out_shallow,
143 stream_out_shallow,
144 b'*',
144 b'*',
145 )
145 )
146
146
147 # don't clone filelogs to shallow clients
147 # don't clone filelogs to shallow clients
148 def _walkstreamfiles(orig, repo, matcher=None):
148 def _walkstreamfiles(orig, repo, matcher=None):
149 if state.shallowremote:
149 if state.shallowremote:
150 # if we are shallow ourselves, stream our local commits
150 # if we are shallow ourselves, stream our local commits
151 if shallowutil.isenabled(repo):
151 if shallowutil.isenabled(repo):
152 striplen = len(repo.store.path) + 1
152 striplen = len(repo.store.path) + 1
153 readdir = repo.store.rawvfs.readdir
153 readdir = repo.store.rawvfs.readdir
154 visit = [os.path.join(repo.store.path, b'data')]
154 visit = [os.path.join(repo.store.path, b'data')]
155 while visit:
155 while visit:
156 p = visit.pop()
156 p = visit.pop()
157 for f, kind, st in readdir(p, stat=True):
157 for f, kind, st in readdir(p, stat=True):
158 fp = p + b'/' + f
158 fp = p + b'/' + f
159 if kind == stat.S_IFREG:
159 if kind == stat.S_IFREG:
160 if not fp.endswith(b'.i') and not fp.endswith(
160 if not fp.endswith(b'.i') and not fp.endswith(
161 b'.d'
161 b'.d'
162 ):
162 ):
163 n = util.pconvert(fp[striplen:])
163 n = util.pconvert(fp[striplen:])
164 d = store.decodedir(n)
164 d = store.decodedir(n)
165 yield store.SimpleStoreEntry(
165 yield store.SimpleStoreEntry(
166 unencoded_path=d,
166 unencoded_path=d,
167 is_volatile=False,
167 is_volatile=False,
168 file_size=st.st_size,
168 file_size=st.st_size,
169 )
169 )
170
170
171 if kind == stat.S_IFDIR:
171 if kind == stat.S_IFDIR:
172 visit.append(fp)
172 visit.append(fp)
173
173
174 if scmutil.istreemanifest(repo):
174 if scmutil.istreemanifest(repo):
175 for entry in repo.store.datafiles():
175 for entry in repo.store.datafiles():
176 u = entry.unencoded_path
176 if not entry.is_revlog:
177 if u.startswith(b'meta/') and (
177 continue
178 u.endswith(b'.i') or u.endswith(b'.d')
178 if entry.revlog_type == store.FILEFLAGS_MANIFESTLOG:
179 ):
180 yield entry
179 yield entry
181
180
182 # Return .d and .i files that do not match the shallow pattern
181 # Return .d and .i files that do not match the shallow pattern
183 match = state.match
182 match = state.match
184 if match and not match.always():
183 if match and not match.always():
185 for entry in repo.store.datafiles():
184 for entry in repo.store.datafiles():
186 u = entry.unencoded_path
185 if not entry.is_revlog:
187 f = u[5:-2] # trim data/... and .i/.d
186 continue
188 if not state.match(f):
187 if not state.match(entry.target_id):
189 yield entry
188 yield entry
190
189
191 for x in repo.store.topfiles():
190 for x in repo.store.topfiles():
192 if state.noflatmf and x[1][:11] == b'00manifest.':
191 if state.noflatmf and x[1][:11] == b'00manifest.':
193 continue
192 continue
194 yield x
193 yield x
195
194
196 elif shallowutil.isenabled(repo):
195 elif shallowutil.isenabled(repo):
197 # don't allow cloning from a shallow repo to a full repo
196 # don't allow cloning from a shallow repo to a full repo
198 # since it would require fetching every version of every
197 # since it would require fetching every version of every
199 # file in order to create the revlogs.
198 # file in order to create the revlogs.
200 raise error.Abort(
199 raise error.Abort(
201 _(b"Cannot clone from a shallow repo to a full repo.")
200 _(b"Cannot clone from a shallow repo to a full repo.")
202 )
201 )
203 else:
202 else:
204 for x in orig(repo, matcher):
203 for x in orig(repo, matcher):
205 yield x
204 yield x
206
205
207 extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
206 extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
208
207
209 # expose remotefilelog capabilities
208 # expose remotefilelog capabilities
210 def _capabilities(orig, repo, proto):
209 def _capabilities(orig, repo, proto):
211 caps = orig(repo, proto)
210 caps = orig(repo, proto)
212 if shallowutil.isenabled(repo) or ui.configbool(
211 if shallowutil.isenabled(repo) or ui.configbool(
213 b'remotefilelog', b'server'
212 b'remotefilelog', b'server'
214 ):
213 ):
215 if isinstance(proto, _sshv1server):
214 if isinstance(proto, _sshv1server):
216 # legacy getfiles method which only works over ssh
215 # legacy getfiles method which only works over ssh
217 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
216 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
218 caps.append(b'x_rfl_getflogheads')
217 caps.append(b'x_rfl_getflogheads')
219 caps.append(b'x_rfl_getfile')
218 caps.append(b'x_rfl_getfile')
220 return caps
219 return caps
221
220
222 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
221 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
223
222
224 def _adjustlinkrev(orig, self, *args, **kwargs):
223 def _adjustlinkrev(orig, self, *args, **kwargs):
225 # When generating file blobs, taking the real path is too slow on large
224 # When generating file blobs, taking the real path is too slow on large
226 # repos, so force it to just return the linkrev directly.
225 # repos, so force it to just return the linkrev directly.
227 repo = self._repo
226 repo = self._repo
228 if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
227 if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
229 return self._filelog.linkrev(self._filelog.rev(self._filenode))
228 return self._filelog.linkrev(self._filelog.rev(self._filenode))
230 return orig(self, *args, **kwargs)
229 return orig(self, *args, **kwargs)
231
230
232 extensions.wrapfunction(
231 extensions.wrapfunction(
233 context.basefilectx, b'_adjustlinkrev', _adjustlinkrev
232 context.basefilectx, b'_adjustlinkrev', _adjustlinkrev
234 )
233 )
235
234
236 def _iscmd(orig, cmd):
235 def _iscmd(orig, cmd):
237 if cmd == b'x_rfl_getfiles':
236 if cmd == b'x_rfl_getfiles':
238 return False
237 return False
239 return orig(cmd)
238 return orig(cmd)
240
239
241 extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd)
240 extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd)
242
241
243
242
244 def _loadfileblob(repo, cachepath, path, node):
243 def _loadfileblob(repo, cachepath, path, node):
245 filecachepath = os.path.join(cachepath, path, hex(node))
244 filecachepath = os.path.join(cachepath, path, hex(node))
246 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
245 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
247 filectx = repo.filectx(path, fileid=node)
246 filectx = repo.filectx(path, fileid=node)
248 if filectx.node() == repo.nullid:
247 if filectx.node() == repo.nullid:
249 repo.changelog = changelog.changelog(repo.svfs)
248 repo.changelog = changelog.changelog(repo.svfs)
250 filectx = repo.filectx(path, fileid=node)
249 filectx = repo.filectx(path, fileid=node)
251
250
252 text = createfileblob(filectx)
251 text = createfileblob(filectx)
253 # TODO configurable compression engines
252 # TODO configurable compression engines
254 text = zlib.compress(text)
253 text = zlib.compress(text)
255
254
256 # everything should be user & group read/writable
255 # everything should be user & group read/writable
257 oldumask = os.umask(0o002)
256 oldumask = os.umask(0o002)
258 try:
257 try:
259 dirname = os.path.dirname(filecachepath)
258 dirname = os.path.dirname(filecachepath)
260 if not os.path.exists(dirname):
259 if not os.path.exists(dirname):
261 try:
260 try:
262 os.makedirs(dirname)
261 os.makedirs(dirname)
263 except FileExistsError:
262 except FileExistsError:
264 pass
263 pass
265
264
266 f = None
265 f = None
267 try:
266 try:
268 f = util.atomictempfile(filecachepath, b"wb")
267 f = util.atomictempfile(filecachepath, b"wb")
269 f.write(text)
268 f.write(text)
270 except (IOError, OSError):
269 except (IOError, OSError):
271 # Don't abort if the user only has permission to read,
270 # Don't abort if the user only has permission to read,
272 # and not write.
271 # and not write.
273 pass
272 pass
274 finally:
273 finally:
275 if f:
274 if f:
276 f.close()
275 f.close()
277 finally:
276 finally:
278 os.umask(oldumask)
277 os.umask(oldumask)
279 else:
278 else:
280 with open(filecachepath, b"rb") as f:
279 with open(filecachepath, b"rb") as f:
281 text = f.read()
280 text = f.read()
282 return text
281 return text
283
282
284
283
285 def getflogheads(repo, proto, path):
284 def getflogheads(repo, proto, path):
286 """A server api for requesting a filelog's heads"""
285 """A server api for requesting a filelog's heads"""
287 flog = repo.file(path)
286 flog = repo.file(path)
288 heads = flog.heads()
287 heads = flog.heads()
289 return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
288 return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
290
289
291
290
292 def getfile(repo, proto, file, node):
291 def getfile(repo, proto, file, node):
293 """A server api for requesting a particular version of a file. Can be used
292 """A server api for requesting a particular version of a file. Can be used
294 in batches to request many files at once. The return protocol is:
293 in batches to request many files at once. The return protocol is:
295 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
294 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
296 non-zero for an error.
295 non-zero for an error.
297
296
298 data is a compressed blob with revlog flag and ancestors information. See
297 data is a compressed blob with revlog flag and ancestors information. See
299 createfileblob for its content.
298 createfileblob for its content.
300 """
299 """
301 if shallowutil.isenabled(repo):
300 if shallowutil.isenabled(repo):
302 return b'1\0' + _(b'cannot fetch remote files from shallow repo')
301 return b'1\0' + _(b'cannot fetch remote files from shallow repo')
303 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
302 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
304 if not cachepath:
303 if not cachepath:
305 cachepath = os.path.join(repo.path, b"remotefilelogcache")
304 cachepath = os.path.join(repo.path, b"remotefilelogcache")
306 node = bin(node.strip())
305 node = bin(node.strip())
307 if node == repo.nullid:
306 if node == repo.nullid:
308 return b'0\0'
307 return b'0\0'
309 return b'0\0' + _loadfileblob(repo, cachepath, file, node)
308 return b'0\0' + _loadfileblob(repo, cachepath, file, node)
310
309
311
310
312 def getfiles(repo, proto):
311 def getfiles(repo, proto):
313 """A server api for requesting particular versions of particular files."""
312 """A server api for requesting particular versions of particular files."""
314 if shallowutil.isenabled(repo):
313 if shallowutil.isenabled(repo):
315 raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
314 raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
316 if not isinstance(proto, _sshv1server):
315 if not isinstance(proto, _sshv1server):
317 raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
316 raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
318
317
319 def streamer():
318 def streamer():
320 fin = proto._fin
319 fin = proto._fin
321
320
322 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
321 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
323 if not cachepath:
322 if not cachepath:
324 cachepath = os.path.join(repo.path, b"remotefilelogcache")
323 cachepath = os.path.join(repo.path, b"remotefilelogcache")
325
324
326 while True:
325 while True:
327 request = fin.readline()[:-1]
326 request = fin.readline()[:-1]
328 if not request:
327 if not request:
329 break
328 break
330
329
331 node = bin(request[:40])
330 node = bin(request[:40])
332 if node == repo.nullid:
331 if node == repo.nullid:
333 yield b'0\n'
332 yield b'0\n'
334 continue
333 continue
335
334
336 path = request[40:]
335 path = request[40:]
337
336
338 text = _loadfileblob(repo, cachepath, path, node)
337 text = _loadfileblob(repo, cachepath, path, node)
339
338
340 yield b'%d\n%s' % (len(text), text)
339 yield b'%d\n%s' % (len(text), text)
341
340
342 # it would be better to only flush after processing a whole batch
341 # it would be better to only flush after processing a whole batch
343 # but currently we don't know if there are more requests coming
342 # but currently we don't know if there are more requests coming
344 proto._fout.flush()
343 proto._fout.flush()
345
344
346 return wireprototypes.streamres(streamer())
345 return wireprototypes.streamres(streamer())
347
346
348
347
349 def createfileblob(filectx):
348 def createfileblob(filectx):
350 """
349 """
351 format:
350 format:
352 v0:
351 v0:
353 str(len(rawtext)) + '\0' + rawtext + ancestortext
352 str(len(rawtext)) + '\0' + rawtext + ancestortext
354 v1:
353 v1:
355 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
354 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
356 metalist := metalist + '\n' + meta | meta
355 metalist := metalist + '\n' + meta | meta
357 meta := sizemeta | flagmeta
356 meta := sizemeta | flagmeta
358 sizemeta := METAKEYSIZE + str(len(rawtext))
357 sizemeta := METAKEYSIZE + str(len(rawtext))
359 flagmeta := METAKEYFLAG + str(flag)
358 flagmeta := METAKEYFLAG + str(flag)
360
359
361 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
360 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
362 length of 1.
361 length of 1.
363 """
362 """
364 flog = filectx.filelog()
363 flog = filectx.filelog()
365 frev = filectx.filerev()
364 frev = filectx.filerev()
366 revlogflags = flog._revlog.flags(frev)
365 revlogflags = flog._revlog.flags(frev)
367 if revlogflags == 0:
366 if revlogflags == 0:
368 # normal files
367 # normal files
369 text = filectx.data()
368 text = filectx.data()
370 else:
369 else:
371 # lfs, read raw revision data
370 # lfs, read raw revision data
372 text = flog.rawdata(frev)
371 text = flog.rawdata(frev)
373
372
374 repo = filectx._repo
373 repo = filectx._repo
375
374
376 ancestors = [filectx]
375 ancestors = [filectx]
377
376
378 try:
377 try:
379 repo.forcelinkrev = True
378 repo.forcelinkrev = True
380 ancestors.extend([f for f in filectx.ancestors()])
379 ancestors.extend([f for f in filectx.ancestors()])
381
380
382 ancestortext = b""
381 ancestortext = b""
383 for ancestorctx in ancestors:
382 for ancestorctx in ancestors:
384 parents = ancestorctx.parents()
383 parents = ancestorctx.parents()
385 p1 = repo.nullid
384 p1 = repo.nullid
386 p2 = repo.nullid
385 p2 = repo.nullid
387 if len(parents) > 0:
386 if len(parents) > 0:
388 p1 = parents[0].filenode()
387 p1 = parents[0].filenode()
389 if len(parents) > 1:
388 if len(parents) > 1:
390 p2 = parents[1].filenode()
389 p2 = parents[1].filenode()
391
390
392 copyname = b""
391 copyname = b""
393 rename = ancestorctx.renamed()
392 rename = ancestorctx.renamed()
394 if rename:
393 if rename:
395 copyname = rename[0]
394 copyname = rename[0]
396 linknode = ancestorctx.node()
395 linknode = ancestorctx.node()
397 ancestortext += b"%s%s%s%s%s\0" % (
396 ancestortext += b"%s%s%s%s%s\0" % (
398 ancestorctx.filenode(),
397 ancestorctx.filenode(),
399 p1,
398 p1,
400 p2,
399 p2,
401 linknode,
400 linknode,
402 copyname,
401 copyname,
403 )
402 )
404 finally:
403 finally:
405 repo.forcelinkrev = False
404 repo.forcelinkrev = False
406
405
407 header = shallowutil.buildfileblobheader(len(text), revlogflags)
406 header = shallowutil.buildfileblobheader(len(text), revlogflags)
408
407
409 return b"%s\0%s%s" % (header, text, ancestortext)
408 return b"%s\0%s%s" % (header, text, ancestortext)
410
409
411
410
412 def gcserver(ui, repo):
411 def gcserver(ui, repo):
413 if not repo.ui.configbool(b"remotefilelog", b"server"):
412 if not repo.ui.configbool(b"remotefilelog", b"server"):
414 return
413 return
415
414
416 neededfiles = set()
415 neededfiles = set()
417 heads = repo.revs(b"heads(tip~25000:) - null")
416 heads = repo.revs(b"heads(tip~25000:) - null")
418
417
419 cachepath = repo.vfs.join(b"remotefilelogcache")
418 cachepath = repo.vfs.join(b"remotefilelogcache")
420 for head in heads:
419 for head in heads:
421 mf = repo[head].manifest()
420 mf = repo[head].manifest()
422 for filename, filenode in mf.items():
421 for filename, filenode in mf.items():
423 filecachepath = os.path.join(cachepath, filename, hex(filenode))
422 filecachepath = os.path.join(cachepath, filename, hex(filenode))
424 neededfiles.add(filecachepath)
423 neededfiles.add(filecachepath)
425
424
426 # delete unneeded older files
425 # delete unneeded older files
427 days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
426 days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
428 expiration = time.time() - (days * 24 * 60 * 60)
427 expiration = time.time() - (days * 24 * 60 * 60)
429
428
430 progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
429 progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
431 progress.update(0)
430 progress.update(0)
432 for root, dirs, files in os.walk(cachepath):
431 for root, dirs, files in os.walk(cachepath):
433 for file in files:
432 for file in files:
434 filepath = os.path.join(root, file)
433 filepath = os.path.join(root, file)
435 progress.increment()
434 progress.increment()
436 if filepath in neededfiles:
435 if filepath in neededfiles:
437 continue
436 continue
438
437
439 stat = os.stat(filepath)
438 stat = os.stat(filepath)
440 if stat.st_mtime < expiration:
439 if stat.st_mtime < expiration:
441 os.remove(filepath)
440 os.remove(filepath)
442
441
443 progress.complete()
442 progress.complete()
General Comments 0
You need to be logged in to leave comments. Login now