##// END OF EJS Templates
remotefilelog: fix what looks like a wrong refactoring...
Valentin Gatien-Baron -
r48601:6802422a stable
parent child Browse files
Show More
@@ -1,441 +1,441 b''
1 # remotefilelogserver.py - server logic for a remotefilelog server
1 # remotefilelogserver.py - server logic for a remotefilelog server
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import errno
9 import errno
10 import os
10 import os
11 import stat
11 import stat
12 import time
12 import time
13 import zlib
13 import zlib
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.node import bin, hex
16 from mercurial.node import bin, hex
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18 from mercurial import (
18 from mercurial import (
19 changegroup,
19 changegroup,
20 changelog,
20 changelog,
21 context,
21 context,
22 error,
22 error,
23 extensions,
23 extensions,
24 match,
24 match,
25 pycompat,
25 pycompat,
26 scmutil,
26 scmutil,
27 store,
27 store,
28 streamclone,
28 streamclone,
29 util,
29 util,
30 wireprotoserver,
30 wireprotoserver,
31 wireprototypes,
31 wireprototypes,
32 wireprotov1server,
32 wireprotov1server,
33 )
33 )
34 from . import (
34 from . import (
35 constants,
35 constants,
36 shallowutil,
36 shallowutil,
37 )
37 )
38
38
39 _sshv1server = wireprotoserver.sshv1protocolhandler
39 _sshv1server = wireprotoserver.sshv1protocolhandler
40
40
41
41
42 def setupserver(ui, repo):
42 def setupserver(ui, repo):
43 """Sets up a normal Mercurial repo so it can serve files to shallow repos."""
43 """Sets up a normal Mercurial repo so it can serve files to shallow repos."""
44 onetimesetup(ui)
44 onetimesetup(ui)
45
45
46 # don't send files to shallow clients during pulls
46 # don't send files to shallow clients during pulls
47 def generatefiles(
47 def generatefiles(
48 orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
48 orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
49 ):
49 ):
50 caps = self._bundlecaps or []
50 caps = self._bundlecaps or []
51 if constants.BUNDLE2_CAPABLITY in caps:
51 if constants.BUNDLE2_CAPABLITY in caps:
52 # only send files that don't match the specified patterns
52 # only send files that don't match the specified patterns
53 includepattern = None
53 includepattern = None
54 excludepattern = None
54 excludepattern = None
55 for cap in self._bundlecaps or []:
55 for cap in self._bundlecaps or []:
56 if cap.startswith(b"includepattern="):
56 if cap.startswith(b"includepattern="):
57 includepattern = cap[len(b"includepattern=") :].split(b'\0')
57 includepattern = cap[len(b"includepattern=") :].split(b'\0')
58 elif cap.startswith(b"excludepattern="):
58 elif cap.startswith(b"excludepattern="):
59 excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
59 excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
60
60
61 m = match.always()
61 m = match.always()
62 if includepattern or excludepattern:
62 if includepattern or excludepattern:
63 m = match.match(
63 m = match.match(
64 repo.root, b'', None, includepattern, excludepattern
64 repo.root, b'', None, includepattern, excludepattern
65 )
65 )
66
66
67 changedfiles = list([f for f in changedfiles if not m(f)])
67 changedfiles = list([f for f in changedfiles if not m(f)])
68 return orig(
68 return orig(
69 self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
69 self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
70 )
70 )
71
71
72 extensions.wrapfunction(
72 extensions.wrapfunction(
73 changegroup.cgpacker, b'generatefiles', generatefiles
73 changegroup.cgpacker, b'generatefiles', generatefiles
74 )
74 )
75
75
76
76
77 onetime = False
77 onetime = False
78
78
79
79
80 def onetimesetup(ui):
80 def onetimesetup(ui):
81 """Configures the wireprotocol for both clients and servers."""
81 """Configures the wireprotocol for both clients and servers."""
82 global onetime
82 global onetime
83 if onetime:
83 if onetime:
84 return
84 return
85 onetime = True
85 onetime = True
86
86
87 # support file content requests
87 # support file content requests
88 wireprotov1server.wireprotocommand(
88 wireprotov1server.wireprotocommand(
89 b'x_rfl_getflogheads', b'path', permission=b'pull'
89 b'x_rfl_getflogheads', b'path', permission=b'pull'
90 )(getflogheads)
90 )(getflogheads)
91 wireprotov1server.wireprotocommand(
91 wireprotov1server.wireprotocommand(
92 b'x_rfl_getfiles', b'', permission=b'pull'
92 b'x_rfl_getfiles', b'', permission=b'pull'
93 )(getfiles)
93 )(getfiles)
94 wireprotov1server.wireprotocommand(
94 wireprotov1server.wireprotocommand(
95 b'x_rfl_getfile', b'file node', permission=b'pull'
95 b'x_rfl_getfile', b'file node', permission=b'pull'
96 )(getfile)
96 )(getfile)
97
97
98 class streamstate(object):
98 class streamstate(object):
99 match = None
99 match = None
100 shallowremote = False
100 shallowremote = False
101 noflatmf = False
101 noflatmf = False
102
102
103 state = streamstate()
103 state = streamstate()
104
104
105 def stream_out_shallow(repo, proto, other):
105 def stream_out_shallow(repo, proto, other):
106 includepattern = None
106 includepattern = None
107 excludepattern = None
107 excludepattern = None
108 raw = other.get(b'includepattern')
108 raw = other.get(b'includepattern')
109 if raw:
109 if raw:
110 includepattern = raw.split(b'\0')
110 includepattern = raw.split(b'\0')
111 raw = other.get(b'excludepattern')
111 raw = other.get(b'excludepattern')
112 if raw:
112 if raw:
113 excludepattern = raw.split(b'\0')
113 excludepattern = raw.split(b'\0')
114
114
115 oldshallow = state.shallowremote
115 oldshallow = state.shallowremote
116 oldmatch = state.match
116 oldmatch = state.match
117 oldnoflatmf = state.noflatmf
117 oldnoflatmf = state.noflatmf
118 try:
118 try:
119 state.shallowremote = True
119 state.shallowremote = True
120 state.match = match.always()
120 state.match = match.always()
121 state.noflatmf = other.get(b'noflatmanifest') == b'True'
121 state.noflatmf = other.get(b'noflatmanifest') == b'True'
122 if includepattern or excludepattern:
122 if includepattern or excludepattern:
123 state.match = match.match(
123 state.match = match.match(
124 repo.root, b'', None, includepattern, excludepattern
124 repo.root, b'', None, includepattern, excludepattern
125 )
125 )
126 streamres = wireprotov1server.stream(repo, proto)
126 streamres = wireprotov1server.stream(repo, proto)
127
127
128 # Force the first value to execute, so the file list is computed
128 # Force the first value to execute, so the file list is computed
129 # within the try/finally scope
129 # within the try/finally scope
130 first = next(streamres.gen)
130 first = next(streamres.gen)
131 second = next(streamres.gen)
131 second = next(streamres.gen)
132
132
133 def gen():
133 def gen():
134 yield first
134 yield first
135 yield second
135 yield second
136 for value in streamres.gen:
136 for value in streamres.gen:
137 yield value
137 yield value
138
138
139 return wireprototypes.streamres(gen())
139 return wireprototypes.streamres(gen())
140 finally:
140 finally:
141 state.shallowremote = oldshallow
141 state.shallowremote = oldshallow
142 state.match = oldmatch
142 state.match = oldmatch
143 state.noflatmf = oldnoflatmf
143 state.noflatmf = oldnoflatmf
144
144
145 wireprotov1server.commands[b'stream_out_shallow'] = (
145 wireprotov1server.commands[b'stream_out_shallow'] = (
146 stream_out_shallow,
146 stream_out_shallow,
147 b'*',
147 b'*',
148 )
148 )
149
149
150 # don't clone filelogs to shallow clients
150 # don't clone filelogs to shallow clients
151 def _walkstreamfiles(orig, repo, matcher=None):
151 def _walkstreamfiles(orig, repo, matcher=None):
152 if state.shallowremote:
152 if state.shallowremote:
153 # if we are shallow ourselves, stream our local commits
153 # if we are shallow ourselves, stream our local commits
154 if shallowutil.isenabled(repo):
154 if shallowutil.isenabled(repo):
155 striplen = len(repo.store.path) + 1
155 striplen = len(repo.store.path) + 1
156 readdir = repo.store.rawvfs.readdir
156 readdir = repo.store.rawvfs.readdir
157 visit = [os.path.join(repo.store.path, b'data')]
157 visit = [os.path.join(repo.store.path, b'data')]
158 while visit:
158 while visit:
159 p = visit.pop()
159 p = visit.pop()
160 for f, kind, st in readdir(p, stat=True):
160 for f, kind, st in readdir(p, stat=True):
161 fp = p + b'/' + f
161 fp = p + b'/' + f
162 if kind == stat.S_IFREG:
162 if kind == stat.S_IFREG:
163 if not fp.endswith(b'.i') and not fp.endswith(
163 if not fp.endswith(b'.i') and not fp.endswith(
164 b'.d'
164 b'.d'
165 ):
165 ):
166 n = util.pconvert(fp[striplen:])
166 n = util.pconvert(fp[striplen:])
167 d = store.decodedir(n)
167 d = store.decodedir(n)
168 t = store.FILETYPE_OTHER
168 t = store.FILETYPE_OTHER
169 yield (t, d, n, st.st_size)
169 yield (t, d, n, st.st_size)
170 if kind == stat.S_IFDIR:
170 if kind == stat.S_IFDIR:
171 visit.append(fp)
171 visit.append(fp)
172
172
173 if scmutil.istreemanifest(repo):
173 if scmutil.istreemanifest(repo):
174 for (t, u, e, s) in repo.store.datafiles():
174 for (t, u, e, s) in repo.store.datafiles():
175 if u.startswith(b'meta/') and (
175 if u.startswith(b'meta/') and (
176 u.endswith(b'.i') or u.endswith(b'.d')
176 u.endswith(b'.i') or u.endswith(b'.d')
177 ):
177 ):
178 yield (t, u, e, s)
178 yield (t, u, e, s)
179
179
180 # Return .d and .i files that do not match the shallow pattern
180 # Return .d and .i files that do not match the shallow pattern
181 match = state.match
181 match = state.match
182 if match and not match.always():
182 if match and not match.always():
183 for (t, u, e, s) in repo.store.datafiles():
183 for (t, u, e, s) in repo.store.datafiles():
184 f = u[5:-2] # trim data/... and .i/.d
184 f = u[5:-2] # trim data/... and .i/.d
185 if not state.match(f):
185 if not state.match(f):
186 yield (t, u, e, s)
186 yield (t, u, e, s)
187
187
188 for x in repo.store.topfiles():
188 for x in repo.store.topfiles():
189 if state.noflatmf and x[0][:11] == b'00manifest.':
189 if state.noflatmf and x[1][:11] == b'00manifest.':
190 continue
190 continue
191 yield x
191 yield x
192
192
193 elif shallowutil.isenabled(repo):
193 elif shallowutil.isenabled(repo):
194 # don't allow cloning from a shallow repo to a full repo
194 # don't allow cloning from a shallow repo to a full repo
195 # since it would require fetching every version of every
195 # since it would require fetching every version of every
196 # file in order to create the revlogs.
196 # file in order to create the revlogs.
197 raise error.Abort(
197 raise error.Abort(
198 _(b"Cannot clone from a shallow repo to a full repo.")
198 _(b"Cannot clone from a shallow repo to a full repo.")
199 )
199 )
200 else:
200 else:
201 for x in orig(repo, matcher):
201 for x in orig(repo, matcher):
202 yield x
202 yield x
203
203
204 extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
204 extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
205
205
206 # expose remotefilelog capabilities
206 # expose remotefilelog capabilities
207 def _capabilities(orig, repo, proto):
207 def _capabilities(orig, repo, proto):
208 caps = orig(repo, proto)
208 caps = orig(repo, proto)
209 if shallowutil.isenabled(repo) or ui.configbool(
209 if shallowutil.isenabled(repo) or ui.configbool(
210 b'remotefilelog', b'server'
210 b'remotefilelog', b'server'
211 ):
211 ):
212 if isinstance(proto, _sshv1server):
212 if isinstance(proto, _sshv1server):
213 # legacy getfiles method which only works over ssh
213 # legacy getfiles method which only works over ssh
214 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
214 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
215 caps.append(b'x_rfl_getflogheads')
215 caps.append(b'x_rfl_getflogheads')
216 caps.append(b'x_rfl_getfile')
216 caps.append(b'x_rfl_getfile')
217 return caps
217 return caps
218
218
219 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
219 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
220
220
221 def _adjustlinkrev(orig, self, *args, **kwargs):
221 def _adjustlinkrev(orig, self, *args, **kwargs):
222 # When generating file blobs, taking the real path is too slow on large
222 # When generating file blobs, taking the real path is too slow on large
223 # repos, so force it to just return the linkrev directly.
223 # repos, so force it to just return the linkrev directly.
224 repo = self._repo
224 repo = self._repo
225 if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
225 if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
226 return self._filelog.linkrev(self._filelog.rev(self._filenode))
226 return self._filelog.linkrev(self._filelog.rev(self._filenode))
227 return orig(self, *args, **kwargs)
227 return orig(self, *args, **kwargs)
228
228
229 extensions.wrapfunction(
229 extensions.wrapfunction(
230 context.basefilectx, b'_adjustlinkrev', _adjustlinkrev
230 context.basefilectx, b'_adjustlinkrev', _adjustlinkrev
231 )
231 )
232
232
233 def _iscmd(orig, cmd):
233 def _iscmd(orig, cmd):
234 if cmd == b'x_rfl_getfiles':
234 if cmd == b'x_rfl_getfiles':
235 return False
235 return False
236 return orig(cmd)
236 return orig(cmd)
237
237
238 extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd)
238 extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd)
239
239
240
240
241 def _loadfileblob(repo, cachepath, path, node):
241 def _loadfileblob(repo, cachepath, path, node):
242 filecachepath = os.path.join(cachepath, path, hex(node))
242 filecachepath = os.path.join(cachepath, path, hex(node))
243 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
243 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
244 filectx = repo.filectx(path, fileid=node)
244 filectx = repo.filectx(path, fileid=node)
245 if filectx.node() == repo.nullid:
245 if filectx.node() == repo.nullid:
246 repo.changelog = changelog.changelog(repo.svfs)
246 repo.changelog = changelog.changelog(repo.svfs)
247 filectx = repo.filectx(path, fileid=node)
247 filectx = repo.filectx(path, fileid=node)
248
248
249 text = createfileblob(filectx)
249 text = createfileblob(filectx)
250 # TODO configurable compression engines
250 # TODO configurable compression engines
251 text = zlib.compress(text)
251 text = zlib.compress(text)
252
252
253 # everything should be user & group read/writable
253 # everything should be user & group read/writable
254 oldumask = os.umask(0o002)
254 oldumask = os.umask(0o002)
255 try:
255 try:
256 dirname = os.path.dirname(filecachepath)
256 dirname = os.path.dirname(filecachepath)
257 if not os.path.exists(dirname):
257 if not os.path.exists(dirname):
258 try:
258 try:
259 os.makedirs(dirname)
259 os.makedirs(dirname)
260 except OSError as ex:
260 except OSError as ex:
261 if ex.errno != errno.EEXIST:
261 if ex.errno != errno.EEXIST:
262 raise
262 raise
263
263
264 f = None
264 f = None
265 try:
265 try:
266 f = util.atomictempfile(filecachepath, b"wb")
266 f = util.atomictempfile(filecachepath, b"wb")
267 f.write(text)
267 f.write(text)
268 except (IOError, OSError):
268 except (IOError, OSError):
269 # Don't abort if the user only has permission to read,
269 # Don't abort if the user only has permission to read,
270 # and not write.
270 # and not write.
271 pass
271 pass
272 finally:
272 finally:
273 if f:
273 if f:
274 f.close()
274 f.close()
275 finally:
275 finally:
276 os.umask(oldumask)
276 os.umask(oldumask)
277 else:
277 else:
278 with open(filecachepath, b"rb") as f:
278 with open(filecachepath, b"rb") as f:
279 text = f.read()
279 text = f.read()
280 return text
280 return text
281
281
282
282
283 def getflogheads(repo, proto, path):
283 def getflogheads(repo, proto, path):
284 """A server api for requesting a filelog's heads"""
284 """A server api for requesting a filelog's heads"""
285 flog = repo.file(path)
285 flog = repo.file(path)
286 heads = flog.heads()
286 heads = flog.heads()
287 return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
287 return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
288
288
289
289
290 def getfile(repo, proto, file, node):
290 def getfile(repo, proto, file, node):
291 """A server api for requesting a particular version of a file. Can be used
291 """A server api for requesting a particular version of a file. Can be used
292 in batches to request many files at once. The return protocol is:
292 in batches to request many files at once. The return protocol is:
293 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
293 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
294 non-zero for an error.
294 non-zero for an error.
295
295
296 data is a compressed blob with revlog flag and ancestors information. See
296 data is a compressed blob with revlog flag and ancestors information. See
297 createfileblob for its content.
297 createfileblob for its content.
298 """
298 """
299 if shallowutil.isenabled(repo):
299 if shallowutil.isenabled(repo):
300 return b'1\0' + _(b'cannot fetch remote files from shallow repo')
300 return b'1\0' + _(b'cannot fetch remote files from shallow repo')
301 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
301 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
302 if not cachepath:
302 if not cachepath:
303 cachepath = os.path.join(repo.path, b"remotefilelogcache")
303 cachepath = os.path.join(repo.path, b"remotefilelogcache")
304 node = bin(node.strip())
304 node = bin(node.strip())
305 if node == repo.nullid:
305 if node == repo.nullid:
306 return b'0\0'
306 return b'0\0'
307 return b'0\0' + _loadfileblob(repo, cachepath, file, node)
307 return b'0\0' + _loadfileblob(repo, cachepath, file, node)
308
308
309
309
310 def getfiles(repo, proto):
310 def getfiles(repo, proto):
311 """A server api for requesting particular versions of particular files."""
311 """A server api for requesting particular versions of particular files."""
312 if shallowutil.isenabled(repo):
312 if shallowutil.isenabled(repo):
313 raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
313 raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
314 if not isinstance(proto, _sshv1server):
314 if not isinstance(proto, _sshv1server):
315 raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
315 raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
316
316
317 def streamer():
317 def streamer():
318 fin = proto._fin
318 fin = proto._fin
319
319
320 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
320 cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
321 if not cachepath:
321 if not cachepath:
322 cachepath = os.path.join(repo.path, b"remotefilelogcache")
322 cachepath = os.path.join(repo.path, b"remotefilelogcache")
323
323
324 while True:
324 while True:
325 request = fin.readline()[:-1]
325 request = fin.readline()[:-1]
326 if not request:
326 if not request:
327 break
327 break
328
328
329 node = bin(request[:40])
329 node = bin(request[:40])
330 if node == repo.nullid:
330 if node == repo.nullid:
331 yield b'0\n'
331 yield b'0\n'
332 continue
332 continue
333
333
334 path = request[40:]
334 path = request[40:]
335
335
336 text = _loadfileblob(repo, cachepath, path, node)
336 text = _loadfileblob(repo, cachepath, path, node)
337
337
338 yield b'%d\n%s' % (len(text), text)
338 yield b'%d\n%s' % (len(text), text)
339
339
340 # it would be better to only flush after processing a whole batch
340 # it would be better to only flush after processing a whole batch
341 # but currently we don't know if there are more requests coming
341 # but currently we don't know if there are more requests coming
342 proto._fout.flush()
342 proto._fout.flush()
343
343
344 return wireprototypes.streamres(streamer())
344 return wireprototypes.streamres(streamer())
345
345
346
346
347 def createfileblob(filectx):
347 def createfileblob(filectx):
348 """
348 """
349 format:
349 format:
350 v0:
350 v0:
351 str(len(rawtext)) + '\0' + rawtext + ancestortext
351 str(len(rawtext)) + '\0' + rawtext + ancestortext
352 v1:
352 v1:
353 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
353 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
354 metalist := metalist + '\n' + meta | meta
354 metalist := metalist + '\n' + meta | meta
355 meta := sizemeta | flagmeta
355 meta := sizemeta | flagmeta
356 sizemeta := METAKEYSIZE + str(len(rawtext))
356 sizemeta := METAKEYSIZE + str(len(rawtext))
357 flagmeta := METAKEYFLAG + str(flag)
357 flagmeta := METAKEYFLAG + str(flag)
358
358
359 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
359 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
360 length of 1.
360 length of 1.
361 """
361 """
362 flog = filectx.filelog()
362 flog = filectx.filelog()
363 frev = filectx.filerev()
363 frev = filectx.filerev()
364 revlogflags = flog._revlog.flags(frev)
364 revlogflags = flog._revlog.flags(frev)
365 if revlogflags == 0:
365 if revlogflags == 0:
366 # normal files
366 # normal files
367 text = filectx.data()
367 text = filectx.data()
368 else:
368 else:
369 # lfs, read raw revision data
369 # lfs, read raw revision data
370 text = flog.rawdata(frev)
370 text = flog.rawdata(frev)
371
371
372 repo = filectx._repo
372 repo = filectx._repo
373
373
374 ancestors = [filectx]
374 ancestors = [filectx]
375
375
376 try:
376 try:
377 repo.forcelinkrev = True
377 repo.forcelinkrev = True
378 ancestors.extend([f for f in filectx.ancestors()])
378 ancestors.extend([f for f in filectx.ancestors()])
379
379
380 ancestortext = b""
380 ancestortext = b""
381 for ancestorctx in ancestors:
381 for ancestorctx in ancestors:
382 parents = ancestorctx.parents()
382 parents = ancestorctx.parents()
383 p1 = repo.nullid
383 p1 = repo.nullid
384 p2 = repo.nullid
384 p2 = repo.nullid
385 if len(parents) > 0:
385 if len(parents) > 0:
386 p1 = parents[0].filenode()
386 p1 = parents[0].filenode()
387 if len(parents) > 1:
387 if len(parents) > 1:
388 p2 = parents[1].filenode()
388 p2 = parents[1].filenode()
389
389
390 copyname = b""
390 copyname = b""
391 rename = ancestorctx.renamed()
391 rename = ancestorctx.renamed()
392 if rename:
392 if rename:
393 copyname = rename[0]
393 copyname = rename[0]
394 linknode = ancestorctx.node()
394 linknode = ancestorctx.node()
395 ancestortext += b"%s%s%s%s%s\0" % (
395 ancestortext += b"%s%s%s%s%s\0" % (
396 ancestorctx.filenode(),
396 ancestorctx.filenode(),
397 p1,
397 p1,
398 p2,
398 p2,
399 linknode,
399 linknode,
400 copyname,
400 copyname,
401 )
401 )
402 finally:
402 finally:
403 repo.forcelinkrev = False
403 repo.forcelinkrev = False
404
404
405 header = shallowutil.buildfileblobheader(len(text), revlogflags)
405 header = shallowutil.buildfileblobheader(len(text), revlogflags)
406
406
407 return b"%s\0%s%s" % (header, text, ancestortext)
407 return b"%s\0%s%s" % (header, text, ancestortext)
408
408
409
409
410 def gcserver(ui, repo):
410 def gcserver(ui, repo):
411 if not repo.ui.configbool(b"remotefilelog", b"server"):
411 if not repo.ui.configbool(b"remotefilelog", b"server"):
412 return
412 return
413
413
414 neededfiles = set()
414 neededfiles = set()
415 heads = repo.revs(b"heads(tip~25000:) - null")
415 heads = repo.revs(b"heads(tip~25000:) - null")
416
416
417 cachepath = repo.vfs.join(b"remotefilelogcache")
417 cachepath = repo.vfs.join(b"remotefilelogcache")
418 for head in heads:
418 for head in heads:
419 mf = repo[head].manifest()
419 mf = repo[head].manifest()
420 for filename, filenode in pycompat.iteritems(mf):
420 for filename, filenode in pycompat.iteritems(mf):
421 filecachepath = os.path.join(cachepath, filename, hex(filenode))
421 filecachepath = os.path.join(cachepath, filename, hex(filenode))
422 neededfiles.add(filecachepath)
422 neededfiles.add(filecachepath)
423
423
424 # delete unneeded older files
424 # delete unneeded older files
425 days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
425 days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
426 expiration = time.time() - (days * 24 * 60 * 60)
426 expiration = time.time() - (days * 24 * 60 * 60)
427
427
428 progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
428 progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
429 progress.update(0)
429 progress.update(0)
430 for root, dirs, files in os.walk(cachepath):
430 for root, dirs, files in os.walk(cachepath):
431 for file in files:
431 for file in files:
432 filepath = os.path.join(root, file)
432 filepath = os.path.join(root, file)
433 progress.increment()
433 progress.increment()
434 if filepath in neededfiles:
434 if filepath in neededfiles:
435 continue
435 continue
436
436
437 stat = os.stat(filepath)
437 stat = os.stat(filepath)
438 if stat.st_mtime < expiration:
438 if stat.st_mtime < expiration:
439 os.remove(filepath)
439 os.remove(filepath)
440
440
441 progress.complete()
441 progress.complete()
General Comments 0
You need to be logged in to leave comments. Login now