##// END OF EJS Templates
remotefilelog: use progress helper in remotefilelogserver...
Martin von Zweigbergk -
r40877:fbd053af default
parent child Browse files
Show More
@@ -1,406 +1,404 b''
1 # remotefilelogserver.py - server logic for a remotefilelog server
1 # remotefilelogserver.py - server logic for a remotefilelog server
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import errno
9 import errno
10 import os
10 import os
11 import stat
11 import stat
12 import time
12 import time
13 import zlib
13 import zlib
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.node import bin, hex, nullid
16 from mercurial.node import bin, hex, nullid
17 from mercurial import (
17 from mercurial import (
18 changegroup,
18 changegroup,
19 changelog,
19 changelog,
20 context,
20 context,
21 error,
21 error,
22 extensions,
22 extensions,
23 match,
23 match,
24 store,
24 store,
25 streamclone,
25 streamclone,
26 util,
26 util,
27 wireprotoserver,
27 wireprotoserver,
28 wireprototypes,
28 wireprototypes,
29 wireprotov1server,
29 wireprotov1server,
30 )
30 )
31 from . import (
31 from . import (
32 constants,
32 constants,
33 shallowutil,
33 shallowutil,
34 )
34 )
35
35
36 _sshv1server = wireprotoserver.sshv1protocolhandler
36 _sshv1server = wireprotoserver.sshv1protocolhandler
37
37
38 def setupserver(ui, repo):
38 def setupserver(ui, repo):
39 """Sets up a normal Mercurial repo so it can serve files to shallow repos.
39 """Sets up a normal Mercurial repo so it can serve files to shallow repos.
40 """
40 """
41 onetimesetup(ui)
41 onetimesetup(ui)
42
42
43 # don't send files to shallow clients during pulls
43 # don't send files to shallow clients during pulls
44 def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source,
44 def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source,
45 *args, **kwargs):
45 *args, **kwargs):
46 caps = self._bundlecaps or []
46 caps = self._bundlecaps or []
47 if constants.BUNDLE2_CAPABLITY in caps:
47 if constants.BUNDLE2_CAPABLITY in caps:
48 # only send files that don't match the specified patterns
48 # only send files that don't match the specified patterns
49 includepattern = None
49 includepattern = None
50 excludepattern = None
50 excludepattern = None
51 for cap in (self._bundlecaps or []):
51 for cap in (self._bundlecaps or []):
52 if cap.startswith("includepattern="):
52 if cap.startswith("includepattern="):
53 includepattern = cap[len("includepattern="):].split('\0')
53 includepattern = cap[len("includepattern="):].split('\0')
54 elif cap.startswith("excludepattern="):
54 elif cap.startswith("excludepattern="):
55 excludepattern = cap[len("excludepattern="):].split('\0')
55 excludepattern = cap[len("excludepattern="):].split('\0')
56
56
57 m = match.always(repo.root, '')
57 m = match.always(repo.root, '')
58 if includepattern or excludepattern:
58 if includepattern or excludepattern:
59 m = match.match(repo.root, '', None,
59 m = match.match(repo.root, '', None,
60 includepattern, excludepattern)
60 includepattern, excludepattern)
61
61
62 changedfiles = list([f for f in changedfiles if not m(f)])
62 changedfiles = list([f for f in changedfiles if not m(f)])
63 return orig(self, changedfiles, linknodes, commonrevs, source,
63 return orig(self, changedfiles, linknodes, commonrevs, source,
64 *args, **kwargs)
64 *args, **kwargs)
65
65
66 extensions.wrapfunction(
66 extensions.wrapfunction(
67 changegroup.cgpacker, 'generatefiles', generatefiles)
67 changegroup.cgpacker, 'generatefiles', generatefiles)
68
68
69 onetime = False
69 onetime = False
70 def onetimesetup(ui):
70 def onetimesetup(ui):
71 """Configures the wireprotocol for both clients and servers.
71 """Configures the wireprotocol for both clients and servers.
72 """
72 """
73 global onetime
73 global onetime
74 if onetime:
74 if onetime:
75 return
75 return
76 onetime = True
76 onetime = True
77
77
78 # support file content requests
78 # support file content requests
79 wireprotov1server.wireprotocommand(
79 wireprotov1server.wireprotocommand(
80 'x_rfl_getflogheads', 'path', permission='pull')(getflogheads)
80 'x_rfl_getflogheads', 'path', permission='pull')(getflogheads)
81 wireprotov1server.wireprotocommand(
81 wireprotov1server.wireprotocommand(
82 'x_rfl_getfiles', '', permission='pull')(getfiles)
82 'x_rfl_getfiles', '', permission='pull')(getfiles)
83 wireprotov1server.wireprotocommand(
83 wireprotov1server.wireprotocommand(
84 'x_rfl_getfile', 'file node', permission='pull')(getfile)
84 'x_rfl_getfile', 'file node', permission='pull')(getfile)
85
85
86 class streamstate(object):
86 class streamstate(object):
87 match = None
87 match = None
88 shallowremote = False
88 shallowremote = False
89 noflatmf = False
89 noflatmf = False
90 state = streamstate()
90 state = streamstate()
91
91
92 def stream_out_shallow(repo, proto, other):
92 def stream_out_shallow(repo, proto, other):
93 includepattern = None
93 includepattern = None
94 excludepattern = None
94 excludepattern = None
95 raw = other.get('includepattern')
95 raw = other.get('includepattern')
96 if raw:
96 if raw:
97 includepattern = raw.split('\0')
97 includepattern = raw.split('\0')
98 raw = other.get('excludepattern')
98 raw = other.get('excludepattern')
99 if raw:
99 if raw:
100 excludepattern = raw.split('\0')
100 excludepattern = raw.split('\0')
101
101
102 oldshallow = state.shallowremote
102 oldshallow = state.shallowremote
103 oldmatch = state.match
103 oldmatch = state.match
104 oldnoflatmf = state.noflatmf
104 oldnoflatmf = state.noflatmf
105 try:
105 try:
106 state.shallowremote = True
106 state.shallowremote = True
107 state.match = match.always(repo.root, '')
107 state.match = match.always(repo.root, '')
108 state.noflatmf = other.get('noflatmanifest') == 'True'
108 state.noflatmf = other.get('noflatmanifest') == 'True'
109 if includepattern or excludepattern:
109 if includepattern or excludepattern:
110 state.match = match.match(repo.root, '', None,
110 state.match = match.match(repo.root, '', None,
111 includepattern, excludepattern)
111 includepattern, excludepattern)
112 streamres = wireprotov1server.stream(repo, proto)
112 streamres = wireprotov1server.stream(repo, proto)
113
113
114 # Force the first value to execute, so the file list is computed
114 # Force the first value to execute, so the file list is computed
115 # within the try/finally scope
115 # within the try/finally scope
116 first = next(streamres.gen)
116 first = next(streamres.gen)
117 second = next(streamres.gen)
117 second = next(streamres.gen)
118 def gen():
118 def gen():
119 yield first
119 yield first
120 yield second
120 yield second
121 for value in streamres.gen:
121 for value in streamres.gen:
122 yield value
122 yield value
123 return wireprototypes.streamres(gen())
123 return wireprototypes.streamres(gen())
124 finally:
124 finally:
125 state.shallowremote = oldshallow
125 state.shallowremote = oldshallow
126 state.match = oldmatch
126 state.match = oldmatch
127 state.noflatmf = oldnoflatmf
127 state.noflatmf = oldnoflatmf
128
128
129 wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*')
129 wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*')
130
130
131 # don't clone filelogs to shallow clients
131 # don't clone filelogs to shallow clients
132 def _walkstreamfiles(orig, repo, matcher=None):
132 def _walkstreamfiles(orig, repo, matcher=None):
133 if state.shallowremote:
133 if state.shallowremote:
134 # if we are shallow ourselves, stream our local commits
134 # if we are shallow ourselves, stream our local commits
135 if shallowutil.isenabled(repo):
135 if shallowutil.isenabled(repo):
136 striplen = len(repo.store.path) + 1
136 striplen = len(repo.store.path) + 1
137 readdir = repo.store.rawvfs.readdir
137 readdir = repo.store.rawvfs.readdir
138 visit = [os.path.join(repo.store.path, 'data')]
138 visit = [os.path.join(repo.store.path, 'data')]
139 while visit:
139 while visit:
140 p = visit.pop()
140 p = visit.pop()
141 for f, kind, st in readdir(p, stat=True):
141 for f, kind, st in readdir(p, stat=True):
142 fp = p + '/' + f
142 fp = p + '/' + f
143 if kind == stat.S_IFREG:
143 if kind == stat.S_IFREG:
144 if not fp.endswith('.i') and not fp.endswith('.d'):
144 if not fp.endswith('.i') and not fp.endswith('.d'):
145 n = util.pconvert(fp[striplen:])
145 n = util.pconvert(fp[striplen:])
146 yield (store.decodedir(n), n, st.st_size)
146 yield (store.decodedir(n), n, st.st_size)
147 if kind == stat.S_IFDIR:
147 if kind == stat.S_IFDIR:
148 visit.append(fp)
148 visit.append(fp)
149
149
150 if 'treemanifest' in repo.requirements:
150 if 'treemanifest' in repo.requirements:
151 for (u, e, s) in repo.store.datafiles():
151 for (u, e, s) in repo.store.datafiles():
152 if (u.startswith('meta/') and
152 if (u.startswith('meta/') and
153 (u.endswith('.i') or u.endswith('.d'))):
153 (u.endswith('.i') or u.endswith('.d'))):
154 yield (u, e, s)
154 yield (u, e, s)
155
155
156 # Return .d and .i files that do not match the shallow pattern
156 # Return .d and .i files that do not match the shallow pattern
157 match = state.match
157 match = state.match
158 if match and not match.always():
158 if match and not match.always():
159 for (u, e, s) in repo.store.datafiles():
159 for (u, e, s) in repo.store.datafiles():
160 f = u[5:-2] # trim data/... and .i/.d
160 f = u[5:-2] # trim data/... and .i/.d
161 if not state.match(f):
161 if not state.match(f):
162 yield (u, e, s)
162 yield (u, e, s)
163
163
164 for x in repo.store.topfiles():
164 for x in repo.store.topfiles():
165 if state.noflatmf and x[0][:11] == '00manifest.':
165 if state.noflatmf and x[0][:11] == '00manifest.':
166 continue
166 continue
167 yield x
167 yield x
168
168
169 elif shallowutil.isenabled(repo):
169 elif shallowutil.isenabled(repo):
170 # don't allow cloning from a shallow repo to a full repo
170 # don't allow cloning from a shallow repo to a full repo
171 # since it would require fetching every version of every
171 # since it would require fetching every version of every
172 # file in order to create the revlogs.
172 # file in order to create the revlogs.
173 raise error.Abort(_("Cannot clone from a shallow repo "
173 raise error.Abort(_("Cannot clone from a shallow repo "
174 "to a full repo."))
174 "to a full repo."))
175 else:
175 else:
176 for x in orig(repo, matcher):
176 for x in orig(repo, matcher):
177 yield x
177 yield x
178
178
179 extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
179 extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
180
180
181 # expose remotefilelog capabilities
181 # expose remotefilelog capabilities
182 def _capabilities(orig, repo, proto):
182 def _capabilities(orig, repo, proto):
183 caps = orig(repo, proto)
183 caps = orig(repo, proto)
184 if (shallowutil.isenabled(repo) or ui.configbool('remotefilelog',
184 if (shallowutil.isenabled(repo) or ui.configbool('remotefilelog',
185 'server')):
185 'server')):
186 if isinstance(proto, _sshv1server):
186 if isinstance(proto, _sshv1server):
187 # legacy getfiles method which only works over ssh
187 # legacy getfiles method which only works over ssh
188 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
188 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
189 caps.append('x_rfl_getflogheads')
189 caps.append('x_rfl_getflogheads')
190 caps.append('x_rfl_getfile')
190 caps.append('x_rfl_getfile')
191 return caps
191 return caps
192 extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
192 extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
193
193
194 def _adjustlinkrev(orig, self, *args, **kwargs):
194 def _adjustlinkrev(orig, self, *args, **kwargs):
195 # When generating file blobs, taking the real path is too slow on large
195 # When generating file blobs, taking the real path is too slow on large
196 # repos, so force it to just return the linkrev directly.
196 # repos, so force it to just return the linkrev directly.
197 repo = self._repo
197 repo = self._repo
198 if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
198 if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
199 return self._filelog.linkrev(self._filelog.rev(self._filenode))
199 return self._filelog.linkrev(self._filelog.rev(self._filenode))
200 return orig(self, *args, **kwargs)
200 return orig(self, *args, **kwargs)
201
201
202 extensions.wrapfunction(
202 extensions.wrapfunction(
203 context.basefilectx, '_adjustlinkrev', _adjustlinkrev)
203 context.basefilectx, '_adjustlinkrev', _adjustlinkrev)
204
204
205 def _iscmd(orig, cmd):
205 def _iscmd(orig, cmd):
206 if cmd == 'x_rfl_getfiles':
206 if cmd == 'x_rfl_getfiles':
207 return False
207 return False
208 return orig(cmd)
208 return orig(cmd)
209
209
210 extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
210 extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
211
211
212 def _loadfileblob(repo, cachepath, path, node):
212 def _loadfileblob(repo, cachepath, path, node):
213 filecachepath = os.path.join(cachepath, path, hex(node))
213 filecachepath = os.path.join(cachepath, path, hex(node))
214 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
214 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
215 filectx = repo.filectx(path, fileid=node)
215 filectx = repo.filectx(path, fileid=node)
216 if filectx.node() == nullid:
216 if filectx.node() == nullid:
217 repo.changelog = changelog.changelog(repo.svfs)
217 repo.changelog = changelog.changelog(repo.svfs)
218 filectx = repo.filectx(path, fileid=node)
218 filectx = repo.filectx(path, fileid=node)
219
219
220 text = createfileblob(filectx)
220 text = createfileblob(filectx)
221 # TODO configurable compression engines
221 # TODO configurable compression engines
222 text = zlib.compress(text)
222 text = zlib.compress(text)
223
223
224 # everything should be user & group read/writable
224 # everything should be user & group read/writable
225 oldumask = os.umask(0o002)
225 oldumask = os.umask(0o002)
226 try:
226 try:
227 dirname = os.path.dirname(filecachepath)
227 dirname = os.path.dirname(filecachepath)
228 if not os.path.exists(dirname):
228 if not os.path.exists(dirname):
229 try:
229 try:
230 os.makedirs(dirname)
230 os.makedirs(dirname)
231 except OSError as ex:
231 except OSError as ex:
232 if ex.errno != errno.EEXIST:
232 if ex.errno != errno.EEXIST:
233 raise
233 raise
234
234
235 f = None
235 f = None
236 try:
236 try:
237 f = util.atomictempfile(filecachepath, "wb")
237 f = util.atomictempfile(filecachepath, "wb")
238 f.write(text)
238 f.write(text)
239 except (IOError, OSError):
239 except (IOError, OSError):
240 # Don't abort if the user only has permission to read,
240 # Don't abort if the user only has permission to read,
241 # and not write.
241 # and not write.
242 pass
242 pass
243 finally:
243 finally:
244 if f:
244 if f:
245 f.close()
245 f.close()
246 finally:
246 finally:
247 os.umask(oldumask)
247 os.umask(oldumask)
248 else:
248 else:
249 with open(filecachepath, "rb") as f:
249 with open(filecachepath, "rb") as f:
250 text = f.read()
250 text = f.read()
251 return text
251 return text
252
252
253 def getflogheads(repo, proto, path):
253 def getflogheads(repo, proto, path):
254 """A server api for requesting a filelog's heads
254 """A server api for requesting a filelog's heads
255 """
255 """
256 flog = repo.file(path)
256 flog = repo.file(path)
257 heads = flog.heads()
257 heads = flog.heads()
258 return '\n'.join((hex(head) for head in heads if head != nullid))
258 return '\n'.join((hex(head) for head in heads if head != nullid))
259
259
260 def getfile(repo, proto, file, node):
260 def getfile(repo, proto, file, node):
261 """A server api for requesting a particular version of a file. Can be used
261 """A server api for requesting a particular version of a file. Can be used
262 in batches to request many files at once. The return protocol is:
262 in batches to request many files at once. The return protocol is:
263 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
263 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
264 non-zero for an error.
264 non-zero for an error.
265
265
266 data is a compressed blob with revlog flag and ancestors information. See
266 data is a compressed blob with revlog flag and ancestors information. See
267 createfileblob for its content.
267 createfileblob for its content.
268 """
268 """
269 if shallowutil.isenabled(repo):
269 if shallowutil.isenabled(repo):
270 return '1\0' + _('cannot fetch remote files from shallow repo')
270 return '1\0' + _('cannot fetch remote files from shallow repo')
271 cachepath = repo.ui.config("remotefilelog", "servercachepath")
271 cachepath = repo.ui.config("remotefilelog", "servercachepath")
272 if not cachepath:
272 if not cachepath:
273 cachepath = os.path.join(repo.path, "remotefilelogcache")
273 cachepath = os.path.join(repo.path, "remotefilelogcache")
274 node = bin(node.strip())
274 node = bin(node.strip())
275 if node == nullid:
275 if node == nullid:
276 return '0\0'
276 return '0\0'
277 return '0\0' + _loadfileblob(repo, cachepath, file, node)
277 return '0\0' + _loadfileblob(repo, cachepath, file, node)
278
278
279 def getfiles(repo, proto):
279 def getfiles(repo, proto):
280 """A server api for requesting particular versions of particular files.
280 """A server api for requesting particular versions of particular files.
281 """
281 """
282 if shallowutil.isenabled(repo):
282 if shallowutil.isenabled(repo):
283 raise error.Abort(_('cannot fetch remote files from shallow repo'))
283 raise error.Abort(_('cannot fetch remote files from shallow repo'))
284 if not isinstance(proto, _sshv1server):
284 if not isinstance(proto, _sshv1server):
285 raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
285 raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
286
286
287 def streamer():
287 def streamer():
288 fin = proto._fin
288 fin = proto._fin
289
289
290 cachepath = repo.ui.config("remotefilelog", "servercachepath")
290 cachepath = repo.ui.config("remotefilelog", "servercachepath")
291 if not cachepath:
291 if not cachepath:
292 cachepath = os.path.join(repo.path, "remotefilelogcache")
292 cachepath = os.path.join(repo.path, "remotefilelogcache")
293
293
294 while True:
294 while True:
295 request = fin.readline()[:-1]
295 request = fin.readline()[:-1]
296 if not request:
296 if not request:
297 break
297 break
298
298
299 node = bin(request[:40])
299 node = bin(request[:40])
300 if node == nullid:
300 if node == nullid:
301 yield '0\n'
301 yield '0\n'
302 continue
302 continue
303
303
304 path = request[40:]
304 path = request[40:]
305
305
306 text = _loadfileblob(repo, cachepath, path, node)
306 text = _loadfileblob(repo, cachepath, path, node)
307
307
308 yield '%d\n%s' % (len(text), text)
308 yield '%d\n%s' % (len(text), text)
309
309
310 # it would be better to only flush after processing a whole batch
310 # it would be better to only flush after processing a whole batch
311 # but currently we don't know if there are more requests coming
311 # but currently we don't know if there are more requests coming
312 proto._fout.flush()
312 proto._fout.flush()
313 return wireprototypes.streamres(streamer())
313 return wireprototypes.streamres(streamer())
314
314
315 def createfileblob(filectx):
315 def createfileblob(filectx):
316 """
316 """
317 format:
317 format:
318 v0:
318 v0:
319 str(len(rawtext)) + '\0' + rawtext + ancestortext
319 str(len(rawtext)) + '\0' + rawtext + ancestortext
320 v1:
320 v1:
321 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
321 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
322 metalist := metalist + '\n' + meta | meta
322 metalist := metalist + '\n' + meta | meta
323 meta := sizemeta | flagmeta
323 meta := sizemeta | flagmeta
324 sizemeta := METAKEYSIZE + str(len(rawtext))
324 sizemeta := METAKEYSIZE + str(len(rawtext))
325 flagmeta := METAKEYFLAG + str(flag)
325 flagmeta := METAKEYFLAG + str(flag)
326
326
327 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
327 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
328 length of 1.
328 length of 1.
329 """
329 """
330 flog = filectx.filelog()
330 flog = filectx.filelog()
331 frev = filectx.filerev()
331 frev = filectx.filerev()
332 revlogflags = flog._revlog.flags(frev)
332 revlogflags = flog._revlog.flags(frev)
333 if revlogflags == 0:
333 if revlogflags == 0:
334 # normal files
334 # normal files
335 text = filectx.data()
335 text = filectx.data()
336 else:
336 else:
337 # lfs, read raw revision data
337 # lfs, read raw revision data
338 text = flog.revision(frev, raw=True)
338 text = flog.revision(frev, raw=True)
339
339
340 repo = filectx._repo
340 repo = filectx._repo
341
341
342 ancestors = [filectx]
342 ancestors = [filectx]
343
343
344 try:
344 try:
345 repo.forcelinkrev = True
345 repo.forcelinkrev = True
346 ancestors.extend([f for f in filectx.ancestors()])
346 ancestors.extend([f for f in filectx.ancestors()])
347
347
348 ancestortext = ""
348 ancestortext = ""
349 for ancestorctx in ancestors:
349 for ancestorctx in ancestors:
350 parents = ancestorctx.parents()
350 parents = ancestorctx.parents()
351 p1 = nullid
351 p1 = nullid
352 p2 = nullid
352 p2 = nullid
353 if len(parents) > 0:
353 if len(parents) > 0:
354 p1 = parents[0].filenode()
354 p1 = parents[0].filenode()
355 if len(parents) > 1:
355 if len(parents) > 1:
356 p2 = parents[1].filenode()
356 p2 = parents[1].filenode()
357
357
358 copyname = ""
358 copyname = ""
359 rename = ancestorctx.renamed()
359 rename = ancestorctx.renamed()
360 if rename:
360 if rename:
361 copyname = rename[0]
361 copyname = rename[0]
362 linknode = ancestorctx.node()
362 linknode = ancestorctx.node()
363 ancestortext += "%s%s%s%s%s\0" % (
363 ancestortext += "%s%s%s%s%s\0" % (
364 ancestorctx.filenode(), p1, p2, linknode,
364 ancestorctx.filenode(), p1, p2, linknode,
365 copyname)
365 copyname)
366 finally:
366 finally:
367 repo.forcelinkrev = False
367 repo.forcelinkrev = False
368
368
369 header = shallowutil.buildfileblobheader(len(text), revlogflags)
369 header = shallowutil.buildfileblobheader(len(text), revlogflags)
370
370
371 return "%s\0%s%s" % (header, text, ancestortext)
371 return "%s\0%s%s" % (header, text, ancestortext)
372
372
373 def gcserver(ui, repo):
373 def gcserver(ui, repo):
374 if not repo.ui.configbool("remotefilelog", "server"):
374 if not repo.ui.configbool("remotefilelog", "server"):
375 return
375 return
376
376
377 neededfiles = set()
377 neededfiles = set()
378 heads = repo.revs("heads(tip~25000:) - null")
378 heads = repo.revs("heads(tip~25000:) - null")
379
379
380 cachepath = repo.vfs.join("remotefilelogcache")
380 cachepath = repo.vfs.join("remotefilelogcache")
381 for head in heads:
381 for head in heads:
382 mf = repo[head].manifest()
382 mf = repo[head].manifest()
383 for filename, filenode in mf.iteritems():
383 for filename, filenode in mf.iteritems():
384 filecachepath = os.path.join(cachepath, filename, hex(filenode))
384 filecachepath = os.path.join(cachepath, filename, hex(filenode))
385 neededfiles.add(filecachepath)
385 neededfiles.add(filecachepath)
386
386
387 # delete unneeded older files
387 # delete unneeded older files
388 days = repo.ui.configint("remotefilelog", "serverexpiration")
388 days = repo.ui.configint("remotefilelog", "serverexpiration")
389 expiration = time.time() - (days * 24 * 60 * 60)
389 expiration = time.time() - (days * 24 * 60 * 60)
390
390
391 _removing = _("removing old server cache")
391 progress = ui.makeprogress(_("removing old server cache"), unit="files")
392 count = 0
392 progress.update(0)
393 ui.progress(_removing, count, unit="files")
394 for root, dirs, files in os.walk(cachepath):
393 for root, dirs, files in os.walk(cachepath):
395 for file in files:
394 for file in files:
396 filepath = os.path.join(root, file)
395 filepath = os.path.join(root, file)
397 count += 1
396 progress.increment()
398 ui.progress(_removing, count, unit="files")
399 if filepath in neededfiles:
397 if filepath in neededfiles:
400 continue
398 continue
401
399
402 stat = os.stat(filepath)
400 stat = os.stat(filepath)
403 if stat.st_mtime < expiration:
401 if stat.st_mtime < expiration:
404 os.remove(filepath)
402 os.remove(filepath)
405
403
406 ui.progress(_removing, None)
404 progress.complete()
General Comments 0
You need to be logged in to leave comments. Login now