##// END OF EJS Templates
remotefilelog: drop compat code for "getbundle_shallow" wireprotocol command...
Pulkit Goyal -
r40552:2dd3a020 default
parent child Browse files
Show More
@@ -1,418 +1,406
1 # remotefilelogserver.py - server logic for a remotefilelog server
1 # remotefilelogserver.py - server logic for a remotefilelog server
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import errno
9 import errno
10 import os
10 import os
11 import stat
11 import stat
12 import time
12 import time
13 import zlib
13 import zlib
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16 from mercurial.node import bin, hex, nullid
16 from mercurial.node import bin, hex, nullid
17 from mercurial import (
17 from mercurial import (
18 changegroup,
18 changegroup,
19 changelog,
19 changelog,
20 context,
20 context,
21 error,
21 error,
22 extensions,
22 extensions,
23 match,
23 match,
24 store,
24 store,
25 streamclone,
25 streamclone,
26 util,
26 util,
27 wireprotoserver,
27 wireprotoserver,
28 wireprototypes,
28 wireprototypes,
29 wireprotov1server,
29 wireprotov1server,
30 )
30 )
31 from . import (
31 from . import (
32 constants,
32 constants,
33 shallowutil,
33 shallowutil,
34 )
34 )
35
35
36 _sshv1server = wireprotoserver.sshv1protocolhandler
36 _sshv1server = wireprotoserver.sshv1protocolhandler
37
37
38 def setupserver(ui, repo):
38 def setupserver(ui, repo):
39 """Sets up a normal Mercurial repo so it can serve files to shallow repos.
39 """Sets up a normal Mercurial repo so it can serve files to shallow repos.
40 """
40 """
41 onetimesetup(ui)
41 onetimesetup(ui)
42
42
43 # don't send files to shallow clients during pulls
43 # don't send files to shallow clients during pulls
44 def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source,
44 def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source,
45 *args, **kwargs):
45 *args, **kwargs):
46 caps = self._bundlecaps or []
46 caps = self._bundlecaps or []
47 if constants.BUNDLE2_CAPABLITY in caps:
47 if constants.BUNDLE2_CAPABLITY in caps:
48 # only send files that don't match the specified patterns
48 # only send files that don't match the specified patterns
49 includepattern = None
49 includepattern = None
50 excludepattern = None
50 excludepattern = None
51 for cap in (self._bundlecaps or []):
51 for cap in (self._bundlecaps or []):
52 if cap.startswith("includepattern="):
52 if cap.startswith("includepattern="):
53 includepattern = cap[len("includepattern="):].split('\0')
53 includepattern = cap[len("includepattern="):].split('\0')
54 elif cap.startswith("excludepattern="):
54 elif cap.startswith("excludepattern="):
55 excludepattern = cap[len("excludepattern="):].split('\0')
55 excludepattern = cap[len("excludepattern="):].split('\0')
56
56
57 m = match.always(repo.root, '')
57 m = match.always(repo.root, '')
58 if includepattern or excludepattern:
58 if includepattern or excludepattern:
59 m = match.match(repo.root, '', None,
59 m = match.match(repo.root, '', None,
60 includepattern, excludepattern)
60 includepattern, excludepattern)
61
61
62 changedfiles = list([f for f in changedfiles if not m(f)])
62 changedfiles = list([f for f in changedfiles if not m(f)])
63 return orig(self, changedfiles, linknodes, commonrevs, source,
63 return orig(self, changedfiles, linknodes, commonrevs, source,
64 *args, **kwargs)
64 *args, **kwargs)
65
65
66 extensions.wrapfunction(
66 extensions.wrapfunction(
67 changegroup.cgpacker, 'generatefiles', generatefiles)
67 changegroup.cgpacker, 'generatefiles', generatefiles)
68
68
69 onetime = False
69 onetime = False
70 def onetimesetup(ui):
70 def onetimesetup(ui):
71 """Configures the wireprotocol for both clients and servers.
71 """Configures the wireprotocol for both clients and servers.
72 """
72 """
73 global onetime
73 global onetime
74 if onetime:
74 if onetime:
75 return
75 return
76 onetime = True
76 onetime = True
77
77
78 # support file content requests
78 # support file content requests
79 wireprotov1server.wireprotocommand(
79 wireprotov1server.wireprotocommand(
80 'x_rfl_getflogheads', 'path', permission='pull')(getflogheads)
80 'x_rfl_getflogheads', 'path', permission='pull')(getflogheads)
81 wireprotov1server.wireprotocommand(
81 wireprotov1server.wireprotocommand(
82 'x_rfl_getfiles', '', permission='pull')(getfiles)
82 'x_rfl_getfiles', '', permission='pull')(getfiles)
83 wireprotov1server.wireprotocommand(
83 wireprotov1server.wireprotocommand(
84 'x_rfl_getfile', 'file node', permission='pull')(getfile)
84 'x_rfl_getfile', 'file node', permission='pull')(getfile)
85
85
86 class streamstate(object):
86 class streamstate(object):
87 match = None
87 match = None
88 shallowremote = False
88 shallowremote = False
89 noflatmf = False
89 noflatmf = False
90 state = streamstate()
90 state = streamstate()
91
91
92 def stream_out_shallow(repo, proto, other):
92 def stream_out_shallow(repo, proto, other):
93 includepattern = None
93 includepattern = None
94 excludepattern = None
94 excludepattern = None
95 raw = other.get('includepattern')
95 raw = other.get('includepattern')
96 if raw:
96 if raw:
97 includepattern = raw.split('\0')
97 includepattern = raw.split('\0')
98 raw = other.get('excludepattern')
98 raw = other.get('excludepattern')
99 if raw:
99 if raw:
100 excludepattern = raw.split('\0')
100 excludepattern = raw.split('\0')
101
101
102 oldshallow = state.shallowremote
102 oldshallow = state.shallowremote
103 oldmatch = state.match
103 oldmatch = state.match
104 oldnoflatmf = state.noflatmf
104 oldnoflatmf = state.noflatmf
105 try:
105 try:
106 state.shallowremote = True
106 state.shallowremote = True
107 state.match = match.always(repo.root, '')
107 state.match = match.always(repo.root, '')
108 state.noflatmf = other.get('noflatmanifest') == 'True'
108 state.noflatmf = other.get('noflatmanifest') == 'True'
109 if includepattern or excludepattern:
109 if includepattern or excludepattern:
110 state.match = match.match(repo.root, '', None,
110 state.match = match.match(repo.root, '', None,
111 includepattern, excludepattern)
111 includepattern, excludepattern)
112 streamres = wireprotov1server.stream(repo, proto)
112 streamres = wireprotov1server.stream(repo, proto)
113
113
114 # Force the first value to execute, so the file list is computed
114 # Force the first value to execute, so the file list is computed
115 # within the try/finally scope
115 # within the try/finally scope
116 first = next(streamres.gen)
116 first = next(streamres.gen)
117 second = next(streamres.gen)
117 second = next(streamres.gen)
118 def gen():
118 def gen():
119 yield first
119 yield first
120 yield second
120 yield second
121 for value in streamres.gen:
121 for value in streamres.gen:
122 yield value
122 yield value
123 return wireprototypes.streamres(gen())
123 return wireprototypes.streamres(gen())
124 finally:
124 finally:
125 state.shallowremote = oldshallow
125 state.shallowremote = oldshallow
126 state.match = oldmatch
126 state.match = oldmatch
127 state.noflatmf = oldnoflatmf
127 state.noflatmf = oldnoflatmf
128
128
129 wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*')
129 wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*')
130
130
131 # don't clone filelogs to shallow clients
131 # don't clone filelogs to shallow clients
132 def _walkstreamfiles(orig, repo, matcher=None):
132 def _walkstreamfiles(orig, repo, matcher=None):
133 if state.shallowremote:
133 if state.shallowremote:
134 # if we are shallow ourselves, stream our local commits
134 # if we are shallow ourselves, stream our local commits
135 if shallowutil.isenabled(repo):
135 if shallowutil.isenabled(repo):
136 striplen = len(repo.store.path) + 1
136 striplen = len(repo.store.path) + 1
137 readdir = repo.store.rawvfs.readdir
137 readdir = repo.store.rawvfs.readdir
138 visit = [os.path.join(repo.store.path, 'data')]
138 visit = [os.path.join(repo.store.path, 'data')]
139 while visit:
139 while visit:
140 p = visit.pop()
140 p = visit.pop()
141 for f, kind, st in readdir(p, stat=True):
141 for f, kind, st in readdir(p, stat=True):
142 fp = p + '/' + f
142 fp = p + '/' + f
143 if kind == stat.S_IFREG:
143 if kind == stat.S_IFREG:
144 if not fp.endswith('.i') and not fp.endswith('.d'):
144 if not fp.endswith('.i') and not fp.endswith('.d'):
145 n = util.pconvert(fp[striplen:])
145 n = util.pconvert(fp[striplen:])
146 yield (store.decodedir(n), n, st.st_size)
146 yield (store.decodedir(n), n, st.st_size)
147 if kind == stat.S_IFDIR:
147 if kind == stat.S_IFDIR:
148 visit.append(fp)
148 visit.append(fp)
149
149
150 if 'treemanifest' in repo.requirements:
150 if 'treemanifest' in repo.requirements:
151 for (u, e, s) in repo.store.datafiles():
151 for (u, e, s) in repo.store.datafiles():
152 if (u.startswith('meta/') and
152 if (u.startswith('meta/') and
153 (u.endswith('.i') or u.endswith('.d'))):
153 (u.endswith('.i') or u.endswith('.d'))):
154 yield (u, e, s)
154 yield (u, e, s)
155
155
156 # Return .d and .i files that do not match the shallow pattern
156 # Return .d and .i files that do not match the shallow pattern
157 match = state.match
157 match = state.match
158 if match and not match.always():
158 if match and not match.always():
159 for (u, e, s) in repo.store.datafiles():
159 for (u, e, s) in repo.store.datafiles():
160 f = u[5:-2] # trim data/... and .i/.d
160 f = u[5:-2] # trim data/... and .i/.d
161 if not state.match(f):
161 if not state.match(f):
162 yield (u, e, s)
162 yield (u, e, s)
163
163
164 for x in repo.store.topfiles():
164 for x in repo.store.topfiles():
165 if state.noflatmf and x[0][:11] == '00manifest.':
165 if state.noflatmf and x[0][:11] == '00manifest.':
166 continue
166 continue
167 yield x
167 yield x
168
168
169 elif shallowutil.isenabled(repo):
169 elif shallowutil.isenabled(repo):
170 # don't allow cloning from a shallow repo to a full repo
170 # don't allow cloning from a shallow repo to a full repo
171 # since it would require fetching every version of every
171 # since it would require fetching every version of every
172 # file in order to create the revlogs.
172 # file in order to create the revlogs.
173 raise error.Abort(_("Cannot clone from a shallow repo "
173 raise error.Abort(_("Cannot clone from a shallow repo "
174 "to a full repo."))
174 "to a full repo."))
175 else:
175 else:
176 for x in orig(repo, matcher):
176 for x in orig(repo, matcher):
177 yield x
177 yield x
178
178
179 extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
179 extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
180
180
181 # We no longer use getbundle_shallow commands, but we must still
182 # support it for migration purposes
183 def getbundleshallow(repo, proto, others):
184 bundlecaps = others.get('bundlecaps', '')
185 bundlecaps = set(bundlecaps.split(','))
186 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
187 others['bundlecaps'] = ','.join(bundlecaps)
188
189 return wireprotov1server.commands["getbundle"][0](repo, proto, others)
190
191 wireprotov1server.commands["getbundle_shallow"] = (getbundleshallow, '*')
192
193 # expose remotefilelog capabilities
181 # expose remotefilelog capabilities
194 def _capabilities(orig, repo, proto):
182 def _capabilities(orig, repo, proto):
195 caps = orig(repo, proto)
183 caps = orig(repo, proto)
196 if (shallowutil.isenabled(repo) or ui.configbool('remotefilelog',
184 if (shallowutil.isenabled(repo) or ui.configbool('remotefilelog',
197 'server')):
185 'server')):
198 if isinstance(proto, _sshv1server):
186 if isinstance(proto, _sshv1server):
199 # legacy getfiles method which only works over ssh
187 # legacy getfiles method which only works over ssh
200 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
188 caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
201 caps.append('x_rfl_getflogheads')
189 caps.append('x_rfl_getflogheads')
202 caps.append('x_rfl_getfile')
190 caps.append('x_rfl_getfile')
203 return caps
191 return caps
204 extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
192 extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
205
193
206 def _adjustlinkrev(orig, self, *args, **kwargs):
194 def _adjustlinkrev(orig, self, *args, **kwargs):
207 # When generating file blobs, taking the real path is too slow on large
195 # When generating file blobs, taking the real path is too slow on large
208 # repos, so force it to just return the linkrev directly.
196 # repos, so force it to just return the linkrev directly.
209 repo = self._repo
197 repo = self._repo
210 if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
198 if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
211 return self._filelog.linkrev(self._filelog.rev(self._filenode))
199 return self._filelog.linkrev(self._filelog.rev(self._filenode))
212 return orig(self, *args, **kwargs)
200 return orig(self, *args, **kwargs)
213
201
214 extensions.wrapfunction(
202 extensions.wrapfunction(
215 context.basefilectx, '_adjustlinkrev', _adjustlinkrev)
203 context.basefilectx, '_adjustlinkrev', _adjustlinkrev)
216
204
217 def _iscmd(orig, cmd):
205 def _iscmd(orig, cmd):
218 if cmd == 'x_rfl_getfiles':
206 if cmd == 'x_rfl_getfiles':
219 return False
207 return False
220 return orig(cmd)
208 return orig(cmd)
221
209
222 extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
210 extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
223
211
224 def _loadfileblob(repo, cachepath, path, node):
212 def _loadfileblob(repo, cachepath, path, node):
225 filecachepath = os.path.join(cachepath, path, hex(node))
213 filecachepath = os.path.join(cachepath, path, hex(node))
226 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
214 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
227 filectx = repo.filectx(path, fileid=node)
215 filectx = repo.filectx(path, fileid=node)
228 if filectx.node() == nullid:
216 if filectx.node() == nullid:
229 repo.changelog = changelog.changelog(repo.svfs)
217 repo.changelog = changelog.changelog(repo.svfs)
230 filectx = repo.filectx(path, fileid=node)
218 filectx = repo.filectx(path, fileid=node)
231
219
232 text = createfileblob(filectx)
220 text = createfileblob(filectx)
233 # TODO configurable compression engines
221 # TODO configurable compression engines
234 text = zlib.compress(text)
222 text = zlib.compress(text)
235
223
236 # everything should be user & group read/writable
224 # everything should be user & group read/writable
237 oldumask = os.umask(0o002)
225 oldumask = os.umask(0o002)
238 try:
226 try:
239 dirname = os.path.dirname(filecachepath)
227 dirname = os.path.dirname(filecachepath)
240 if not os.path.exists(dirname):
228 if not os.path.exists(dirname):
241 try:
229 try:
242 os.makedirs(dirname)
230 os.makedirs(dirname)
243 except OSError as ex:
231 except OSError as ex:
244 if ex.errno != errno.EEXIST:
232 if ex.errno != errno.EEXIST:
245 raise
233 raise
246
234
247 f = None
235 f = None
248 try:
236 try:
249 f = util.atomictempfile(filecachepath, "w")
237 f = util.atomictempfile(filecachepath, "w")
250 f.write(text)
238 f.write(text)
251 except (IOError, OSError):
239 except (IOError, OSError):
252 # Don't abort if the user only has permission to read,
240 # Don't abort if the user only has permission to read,
253 # and not write.
241 # and not write.
254 pass
242 pass
255 finally:
243 finally:
256 if f:
244 if f:
257 f.close()
245 f.close()
258 finally:
246 finally:
259 os.umask(oldumask)
247 os.umask(oldumask)
260 else:
248 else:
261 with open(filecachepath, "r") as f:
249 with open(filecachepath, "r") as f:
262 text = f.read()
250 text = f.read()
263 return text
251 return text
264
252
265 def getflogheads(repo, proto, path):
253 def getflogheads(repo, proto, path):
266 """A server api for requesting a filelog's heads
254 """A server api for requesting a filelog's heads
267 """
255 """
268 flog = repo.file(path)
256 flog = repo.file(path)
269 heads = flog.heads()
257 heads = flog.heads()
270 return '\n'.join((hex(head) for head in heads if head != nullid))
258 return '\n'.join((hex(head) for head in heads if head != nullid))
271
259
272 def getfile(repo, proto, file, node):
260 def getfile(repo, proto, file, node):
273 """A server api for requesting a particular version of a file. Can be used
261 """A server api for requesting a particular version of a file. Can be used
274 in batches to request many files at once. The return protocol is:
262 in batches to request many files at once. The return protocol is:
275 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
263 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
276 non-zero for an error.
264 non-zero for an error.
277
265
278 data is a compressed blob with revlog flag and ancestors information. See
266 data is a compressed blob with revlog flag and ancestors information. See
279 createfileblob for its content.
267 createfileblob for its content.
280 """
268 """
281 if shallowutil.isenabled(repo):
269 if shallowutil.isenabled(repo):
282 return '1\0' + _('cannot fetch remote files from shallow repo')
270 return '1\0' + _('cannot fetch remote files from shallow repo')
283 cachepath = repo.ui.config("remotefilelog", "servercachepath")
271 cachepath = repo.ui.config("remotefilelog", "servercachepath")
284 if not cachepath:
272 if not cachepath:
285 cachepath = os.path.join(repo.path, "remotefilelogcache")
273 cachepath = os.path.join(repo.path, "remotefilelogcache")
286 node = bin(node.strip())
274 node = bin(node.strip())
287 if node == nullid:
275 if node == nullid:
288 return '0\0'
276 return '0\0'
289 return '0\0' + _loadfileblob(repo, cachepath, file, node)
277 return '0\0' + _loadfileblob(repo, cachepath, file, node)
290
278
291 def getfiles(repo, proto):
279 def getfiles(repo, proto):
292 """A server api for requesting particular versions of particular files.
280 """A server api for requesting particular versions of particular files.
293 """
281 """
294 if shallowutil.isenabled(repo):
282 if shallowutil.isenabled(repo):
295 raise error.Abort(_('cannot fetch remote files from shallow repo'))
283 raise error.Abort(_('cannot fetch remote files from shallow repo'))
296 if not isinstance(proto, _sshv1server):
284 if not isinstance(proto, _sshv1server):
297 raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
285 raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
298
286
299 def streamer():
287 def streamer():
300 fin = proto._fin
288 fin = proto._fin
301
289
302 cachepath = repo.ui.config("remotefilelog", "servercachepath")
290 cachepath = repo.ui.config("remotefilelog", "servercachepath")
303 if not cachepath:
291 if not cachepath:
304 cachepath = os.path.join(repo.path, "remotefilelogcache")
292 cachepath = os.path.join(repo.path, "remotefilelogcache")
305
293
306 while True:
294 while True:
307 request = fin.readline()[:-1]
295 request = fin.readline()[:-1]
308 if not request:
296 if not request:
309 break
297 break
310
298
311 node = bin(request[:40])
299 node = bin(request[:40])
312 if node == nullid:
300 if node == nullid:
313 yield '0\n'
301 yield '0\n'
314 continue
302 continue
315
303
316 path = request[40:]
304 path = request[40:]
317
305
318 text = _loadfileblob(repo, cachepath, path, node)
306 text = _loadfileblob(repo, cachepath, path, node)
319
307
320 yield '%d\n%s' % (len(text), text)
308 yield '%d\n%s' % (len(text), text)
321
309
322 # it would be better to only flush after processing a whole batch
310 # it would be better to only flush after processing a whole batch
323 # but currently we don't know if there are more requests coming
311 # but currently we don't know if there are more requests coming
324 proto._fout.flush()
312 proto._fout.flush()
325 return wireprototypes.streamres(streamer())
313 return wireprototypes.streamres(streamer())
326
314
327 def createfileblob(filectx):
315 def createfileblob(filectx):
328 """
316 """
329 format:
317 format:
330 v0:
318 v0:
331 str(len(rawtext)) + '\0' + rawtext + ancestortext
319 str(len(rawtext)) + '\0' + rawtext + ancestortext
332 v1:
320 v1:
333 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
321 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
334 metalist := metalist + '\n' + meta | meta
322 metalist := metalist + '\n' + meta | meta
335 meta := sizemeta | flagmeta
323 meta := sizemeta | flagmeta
336 sizemeta := METAKEYSIZE + str(len(rawtext))
324 sizemeta := METAKEYSIZE + str(len(rawtext))
337 flagmeta := METAKEYFLAG + str(flag)
325 flagmeta := METAKEYFLAG + str(flag)
338
326
339 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
327 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
340 length of 1.
328 length of 1.
341 """
329 """
342 flog = filectx.filelog()
330 flog = filectx.filelog()
343 frev = filectx.filerev()
331 frev = filectx.filerev()
344 revlogflags = flog._revlog.flags(frev)
332 revlogflags = flog._revlog.flags(frev)
345 if revlogflags == 0:
333 if revlogflags == 0:
346 # normal files
334 # normal files
347 text = filectx.data()
335 text = filectx.data()
348 else:
336 else:
349 # lfs, read raw revision data
337 # lfs, read raw revision data
350 text = flog.revision(frev, raw=True)
338 text = flog.revision(frev, raw=True)
351
339
352 repo = filectx._repo
340 repo = filectx._repo
353
341
354 ancestors = [filectx]
342 ancestors = [filectx]
355
343
356 try:
344 try:
357 repo.forcelinkrev = True
345 repo.forcelinkrev = True
358 ancestors.extend([f for f in filectx.ancestors()])
346 ancestors.extend([f for f in filectx.ancestors()])
359
347
360 ancestortext = ""
348 ancestortext = ""
361 for ancestorctx in ancestors:
349 for ancestorctx in ancestors:
362 parents = ancestorctx.parents()
350 parents = ancestorctx.parents()
363 p1 = nullid
351 p1 = nullid
364 p2 = nullid
352 p2 = nullid
365 if len(parents) > 0:
353 if len(parents) > 0:
366 p1 = parents[0].filenode()
354 p1 = parents[0].filenode()
367 if len(parents) > 1:
355 if len(parents) > 1:
368 p2 = parents[1].filenode()
356 p2 = parents[1].filenode()
369
357
370 copyname = ""
358 copyname = ""
371 rename = ancestorctx.renamed()
359 rename = ancestorctx.renamed()
372 if rename:
360 if rename:
373 copyname = rename[0]
361 copyname = rename[0]
374 linknode = ancestorctx.node()
362 linknode = ancestorctx.node()
375 ancestortext += "%s%s%s%s%s\0" % (
363 ancestortext += "%s%s%s%s%s\0" % (
376 ancestorctx.filenode(), p1, p2, linknode,
364 ancestorctx.filenode(), p1, p2, linknode,
377 copyname)
365 copyname)
378 finally:
366 finally:
379 repo.forcelinkrev = False
367 repo.forcelinkrev = False
380
368
381 header = shallowutil.buildfileblobheader(len(text), revlogflags)
369 header = shallowutil.buildfileblobheader(len(text), revlogflags)
382
370
383 return "%s\0%s%s" % (header, text, ancestortext)
371 return "%s\0%s%s" % (header, text, ancestortext)
384
372
385 def gcserver(ui, repo):
373 def gcserver(ui, repo):
386 if not repo.ui.configbool("remotefilelog", "server"):
374 if not repo.ui.configbool("remotefilelog", "server"):
387 return
375 return
388
376
389 neededfiles = set()
377 neededfiles = set()
390 heads = repo.revs("heads(tip~25000:) - null")
378 heads = repo.revs("heads(tip~25000:) - null")
391
379
392 cachepath = repo.vfs.join("remotefilelogcache")
380 cachepath = repo.vfs.join("remotefilelogcache")
393 for head in heads:
381 for head in heads:
394 mf = repo[head].manifest()
382 mf = repo[head].manifest()
395 for filename, filenode in mf.iteritems():
383 for filename, filenode in mf.iteritems():
396 filecachepath = os.path.join(cachepath, filename, hex(filenode))
384 filecachepath = os.path.join(cachepath, filename, hex(filenode))
397 neededfiles.add(filecachepath)
385 neededfiles.add(filecachepath)
398
386
399 # delete unneeded older files
387 # delete unneeded older files
400 days = repo.ui.configint("remotefilelog", "serverexpiration")
388 days = repo.ui.configint("remotefilelog", "serverexpiration")
401 expiration = time.time() - (days * 24 * 60 * 60)
389 expiration = time.time() - (days * 24 * 60 * 60)
402
390
403 _removing = _("removing old server cache")
391 _removing = _("removing old server cache")
404 count = 0
392 count = 0
405 ui.progress(_removing, count, unit="files")
393 ui.progress(_removing, count, unit="files")
406 for root, dirs, files in os.walk(cachepath):
394 for root, dirs, files in os.walk(cachepath):
407 for file in files:
395 for file in files:
408 filepath = os.path.join(root, file)
396 filepath = os.path.join(root, file)
409 count += 1
397 count += 1
410 ui.progress(_removing, count, unit="files")
398 ui.progress(_removing, count, unit="files")
411 if filepath in neededfiles:
399 if filepath in neededfiles:
412 continue
400 continue
413
401
414 stat = os.stat(filepath)
402 stat = os.stat(filepath)
415 if stat.st_mtime < expiration:
403 if stat.st_mtime < expiration:
416 os.remove(filepath)
404 os.remove(filepath)
417
405
418 ui.progress(_removing, None)
406 ui.progress(_removing, None)
General Comments 0
You need to be logged in to leave comments. Login now