##// END OF EJS Templates
remotefilelogserver: remove pack-serving functionality...
Augie Fackler -
r40538:3b900876 default
parent child Browse files
Show More
@@ -1,554 +1,417 b''
1 # remotefilelogserver.py - server logic for a remotefilelog server
1 # remotefilelogserver.py - server logic for a remotefilelog server
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import errno
9 import errno
10 import os
10 import os
11 import stat
11 import stat
12 import time
12 import time
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial.node import bin, hex, nullid, nullrev
15 from mercurial.node import bin, hex, nullid
16 from mercurial import (
16 from mercurial import (
17 ancestor,
18 changegroup,
17 changegroup,
19 changelog,
18 changelog,
20 context,
19 context,
21 error,
20 error,
22 extensions,
21 extensions,
23 match,
22 match,
24 pycompat,
25 store,
23 store,
26 streamclone,
24 streamclone,
27 util,
25 util,
28 wireprotoserver,
26 wireprotoserver,
29 wireprototypes,
27 wireprototypes,
30 wireprotov1server,
28 wireprotov1server,
31 )
29 )
32 from . import (
30 from . import (
33 constants,
34 lz4wrapper,
31 lz4wrapper,
35 shallowrepo,
32 shallowrepo,
36 shallowutil,
33 shallowutil,
37 wirepack,
38 )
34 )
39
35
40 _sshv1server = wireprotoserver.sshv1protocolhandler
36 _sshv1server = wireprotoserver.sshv1protocolhandler
41
37
42 def setupserver(ui, repo):
38 def setupserver(ui, repo):
43 """Sets up a normal Mercurial repo so it can serve files to shallow repos.
39 """Sets up a normal Mercurial repo so it can serve files to shallow repos.
44 """
40 """
45 onetimesetup(ui)
41 onetimesetup(ui)
46
42
47 # don't send files to shallow clients during pulls
43 # don't send files to shallow clients during pulls
48 def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source,
44 def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source,
49 *args, **kwargs):
45 *args, **kwargs):
50 caps = self._bundlecaps or []
46 caps = self._bundlecaps or []
51 if shallowrepo.requirement in caps:
47 if shallowrepo.requirement in caps:
52 # only send files that don't match the specified patterns
48 # only send files that don't match the specified patterns
53 includepattern = None
49 includepattern = None
54 excludepattern = None
50 excludepattern = None
55 for cap in (self._bundlecaps or []):
51 for cap in (self._bundlecaps or []):
56 if cap.startswith("includepattern="):
52 if cap.startswith("includepattern="):
57 includepattern = cap[len("includepattern="):].split('\0')
53 includepattern = cap[len("includepattern="):].split('\0')
58 elif cap.startswith("excludepattern="):
54 elif cap.startswith("excludepattern="):
59 excludepattern = cap[len("excludepattern="):].split('\0')
55 excludepattern = cap[len("excludepattern="):].split('\0')
60
56
61 m = match.always(repo.root, '')
57 m = match.always(repo.root, '')
62 if includepattern or excludepattern:
58 if includepattern or excludepattern:
63 m = match.match(repo.root, '', None,
59 m = match.match(repo.root, '', None,
64 includepattern, excludepattern)
60 includepattern, excludepattern)
65
61
66 changedfiles = list([f for f in changedfiles if not m(f)])
62 changedfiles = list([f for f in changedfiles if not m(f)])
67 return orig(self, changedfiles, linknodes, commonrevs, source,
63 return orig(self, changedfiles, linknodes, commonrevs, source,
68 *args, **kwargs)
64 *args, **kwargs)
69
65
70 extensions.wrapfunction(
66 extensions.wrapfunction(
71 changegroup.cgpacker, 'generatefiles', generatefiles)
67 changegroup.cgpacker, 'generatefiles', generatefiles)
72
68
73 onetime = False
69 onetime = False
74 def onetimesetup(ui):
70 def onetimesetup(ui):
75 """Configures the wireprotocol for both clients and servers.
71 """Configures the wireprotocol for both clients and servers.
76 """
72 """
77 global onetime
73 global onetime
78 if onetime:
74 if onetime:
79 return
75 return
80 onetime = True
76 onetime = True
81
77
82 # support file content requests
78 # support file content requests
83 wireprotov1server.wireprotocommand(
79 wireprotov1server.wireprotocommand(
84 'getflogheads', 'path', permission='pull')(getflogheads)
80 'getflogheads', 'path', permission='pull')(getflogheads)
85 wireprotov1server.wireprotocommand(
81 wireprotov1server.wireprotocommand(
86 'getfiles', '', permission='pull')(getfiles)
82 'getfiles', '', permission='pull')(getfiles)
87 wireprotov1server.wireprotocommand(
83 wireprotov1server.wireprotocommand(
88 'getfile', 'file node', permission='pull')(getfile)
84 'getfile', 'file node', permission='pull')(getfile)
89 wireprotov1server.wireprotocommand(
90 'getpackv1', '*', permission='pull')(getpack)
91
85
92 class streamstate(object):
86 class streamstate(object):
93 match = None
87 match = None
94 shallowremote = False
88 shallowremote = False
95 noflatmf = False
89 noflatmf = False
96 state = streamstate()
90 state = streamstate()
97
91
98 def stream_out_shallow(repo, proto, other):
92 def stream_out_shallow(repo, proto, other):
99 includepattern = None
93 includepattern = None
100 excludepattern = None
94 excludepattern = None
101 raw = other.get('includepattern')
95 raw = other.get('includepattern')
102 if raw:
96 if raw:
103 includepattern = raw.split('\0')
97 includepattern = raw.split('\0')
104 raw = other.get('excludepattern')
98 raw = other.get('excludepattern')
105 if raw:
99 if raw:
106 excludepattern = raw.split('\0')
100 excludepattern = raw.split('\0')
107
101
108 oldshallow = state.shallowremote
102 oldshallow = state.shallowremote
109 oldmatch = state.match
103 oldmatch = state.match
110 oldnoflatmf = state.noflatmf
104 oldnoflatmf = state.noflatmf
111 try:
105 try:
112 state.shallowremote = True
106 state.shallowremote = True
113 state.match = match.always(repo.root, '')
107 state.match = match.always(repo.root, '')
114 state.noflatmf = other.get('noflatmanifest') == 'True'
108 state.noflatmf = other.get('noflatmanifest') == 'True'
115 if includepattern or excludepattern:
109 if includepattern or excludepattern:
116 state.match = match.match(repo.root, '', None,
110 state.match = match.match(repo.root, '', None,
117 includepattern, excludepattern)
111 includepattern, excludepattern)
118 streamres = wireprotov1server.stream(repo, proto)
112 streamres = wireprotov1server.stream(repo, proto)
119
113
120 # Force the first value to execute, so the file list is computed
114 # Force the first value to execute, so the file list is computed
121 # within the try/finally scope
115 # within the try/finally scope
122 first = next(streamres.gen)
116 first = next(streamres.gen)
123 second = next(streamres.gen)
117 second = next(streamres.gen)
124 def gen():
118 def gen():
125 yield first
119 yield first
126 yield second
120 yield second
127 for value in streamres.gen:
121 for value in streamres.gen:
128 yield value
122 yield value
129 return wireprototypes.streamres(gen())
123 return wireprototypes.streamres(gen())
130 finally:
124 finally:
131 state.shallowremote = oldshallow
125 state.shallowremote = oldshallow
132 state.match = oldmatch
126 state.match = oldmatch
133 state.noflatmf = oldnoflatmf
127 state.noflatmf = oldnoflatmf
134
128
135 wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*')
129 wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*')
136
130
137 # don't clone filelogs to shallow clients
131 # don't clone filelogs to shallow clients
138 def _walkstreamfiles(orig, repo):
132 def _walkstreamfiles(orig, repo):
139 if state.shallowremote:
133 if state.shallowremote:
140 # if we are shallow ourselves, stream our local commits
134 # if we are shallow ourselves, stream our local commits
141 if shallowrepo.requirement in repo.requirements:
135 if shallowrepo.requirement in repo.requirements:
142 striplen = len(repo.store.path) + 1
136 striplen = len(repo.store.path) + 1
143 readdir = repo.store.rawvfs.readdir
137 readdir = repo.store.rawvfs.readdir
144 visit = [os.path.join(repo.store.path, 'data')]
138 visit = [os.path.join(repo.store.path, 'data')]
145 while visit:
139 while visit:
146 p = visit.pop()
140 p = visit.pop()
147 for f, kind, st in readdir(p, stat=True):
141 for f, kind, st in readdir(p, stat=True):
148 fp = p + '/' + f
142 fp = p + '/' + f
149 if kind == stat.S_IFREG:
143 if kind == stat.S_IFREG:
150 if not fp.endswith('.i') and not fp.endswith('.d'):
144 if not fp.endswith('.i') and not fp.endswith('.d'):
151 n = util.pconvert(fp[striplen:])
145 n = util.pconvert(fp[striplen:])
152 yield (store.decodedir(n), n, st.st_size)
146 yield (store.decodedir(n), n, st.st_size)
153 if kind == stat.S_IFDIR:
147 if kind == stat.S_IFDIR:
154 visit.append(fp)
148 visit.append(fp)
155
149
156 if 'treemanifest' in repo.requirements:
150 if 'treemanifest' in repo.requirements:
157 for (u, e, s) in repo.store.datafiles():
151 for (u, e, s) in repo.store.datafiles():
158 if (u.startswith('meta/') and
152 if (u.startswith('meta/') and
159 (u.endswith('.i') or u.endswith('.d'))):
153 (u.endswith('.i') or u.endswith('.d'))):
160 yield (u, e, s)
154 yield (u, e, s)
161
155
162 # Return .d and .i files that do not match the shallow pattern
156 # Return .d and .i files that do not match the shallow pattern
163 match = state.match
157 match = state.match
164 if match and not match.always():
158 if match and not match.always():
165 for (u, e, s) in repo.store.datafiles():
159 for (u, e, s) in repo.store.datafiles():
166 f = u[5:-2] # trim data/... and .i/.d
160 f = u[5:-2] # trim data/... and .i/.d
167 if not state.match(f):
161 if not state.match(f):
168 yield (u, e, s)
162 yield (u, e, s)
169
163
170 for x in repo.store.topfiles():
164 for x in repo.store.topfiles():
171 if state.noflatmf and x[0][:11] == '00manifest.':
165 if state.noflatmf and x[0][:11] == '00manifest.':
172 continue
166 continue
173 yield x
167 yield x
174
168
175 elif shallowrepo.requirement in repo.requirements:
169 elif shallowrepo.requirement in repo.requirements:
176 # don't allow cloning from a shallow repo to a full repo
170 # don't allow cloning from a shallow repo to a full repo
177 # since it would require fetching every version of every
171 # since it would require fetching every version of every
178 # file in order to create the revlogs.
172 # file in order to create the revlogs.
179 raise error.Abort(_("Cannot clone from a shallow repo "
173 raise error.Abort(_("Cannot clone from a shallow repo "
180 "to a full repo."))
174 "to a full repo."))
181 else:
175 else:
182 for x in orig(repo):
176 for x in orig(repo):
183 yield x
177 yield x
184
178
185 extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
179 extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
186
180
187 # We no longer use getbundle_shallow commands, but we must still
181 # We no longer use getbundle_shallow commands, but we must still
188 # support it for migration purposes
182 # support it for migration purposes
189 def getbundleshallow(repo, proto, others):
183 def getbundleshallow(repo, proto, others):
190 bundlecaps = others.get('bundlecaps', '')
184 bundlecaps = others.get('bundlecaps', '')
191 bundlecaps = set(bundlecaps.split(','))
185 bundlecaps = set(bundlecaps.split(','))
192 bundlecaps.add('remotefilelog')
186 bundlecaps.add('remotefilelog')
193 others['bundlecaps'] = ','.join(bundlecaps)
187 others['bundlecaps'] = ','.join(bundlecaps)
194
188
195 return wireprotov1server.commands["getbundle"][0](repo, proto, others)
189 return wireprotov1server.commands["getbundle"][0](repo, proto, others)
196
190
197 wireprotov1server.commands["getbundle_shallow"] = (getbundleshallow, '*')
191 wireprotov1server.commands["getbundle_shallow"] = (getbundleshallow, '*')
198
192
199 # expose remotefilelog capabilities
193 # expose remotefilelog capabilities
200 def _capabilities(orig, repo, proto):
194 def _capabilities(orig, repo, proto):
201 caps = orig(repo, proto)
195 caps = orig(repo, proto)
202 if ((shallowrepo.requirement in repo.requirements or
196 if ((shallowrepo.requirement in repo.requirements or
203 ui.configbool('remotefilelog', 'server'))):
197 ui.configbool('remotefilelog', 'server'))):
204 if isinstance(proto, _sshv1server):
198 if isinstance(proto, _sshv1server):
205 # legacy getfiles method which only works over ssh
199 # legacy getfiles method which only works over ssh
206 caps.append(shallowrepo.requirement)
200 caps.append(shallowrepo.requirement)
207 caps.append('getflogheads')
201 caps.append('getflogheads')
208 caps.append('getfile')
202 caps.append('getfile')
209 return caps
203 return caps
210 extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
204 extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
211
205
212 def _adjustlinkrev(orig, self, *args, **kwargs):
206 def _adjustlinkrev(orig, self, *args, **kwargs):
213 # When generating file blobs, taking the real path is too slow on large
207 # When generating file blobs, taking the real path is too slow on large
214 # repos, so force it to just return the linkrev directly.
208 # repos, so force it to just return the linkrev directly.
215 repo = self._repo
209 repo = self._repo
216 if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
210 if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
217 return self._filelog.linkrev(self._filelog.rev(self._filenode))
211 return self._filelog.linkrev(self._filelog.rev(self._filenode))
218 return orig(self, *args, **kwargs)
212 return orig(self, *args, **kwargs)
219
213
220 extensions.wrapfunction(
214 extensions.wrapfunction(
221 context.basefilectx, '_adjustlinkrev', _adjustlinkrev)
215 context.basefilectx, '_adjustlinkrev', _adjustlinkrev)
222
216
223 def _iscmd(orig, cmd):
217 def _iscmd(orig, cmd):
224 if cmd == 'getfiles':
218 if cmd == 'getfiles':
225 return False
219 return False
226 return orig(cmd)
220 return orig(cmd)
227
221
228 extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
222 extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
229
223
230 def _loadfileblob(repo, cachepath, path, node):
224 def _loadfileblob(repo, cachepath, path, node):
231 filecachepath = os.path.join(cachepath, path, hex(node))
225 filecachepath = os.path.join(cachepath, path, hex(node))
232 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
226 if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
233 filectx = repo.filectx(path, fileid=node)
227 filectx = repo.filectx(path, fileid=node)
234 if filectx.node() == nullid:
228 if filectx.node() == nullid:
235 repo.changelog = changelog.changelog(repo.svfs)
229 repo.changelog = changelog.changelog(repo.svfs)
236 filectx = repo.filectx(path, fileid=node)
230 filectx = repo.filectx(path, fileid=node)
237
231
238 text = createfileblob(filectx)
232 text = createfileblob(filectx)
239 text = lz4wrapper.lzcompresshc(text)
233 text = lz4wrapper.lzcompresshc(text)
240
234
241 # everything should be user & group read/writable
235 # everything should be user & group read/writable
242 oldumask = os.umask(0o002)
236 oldumask = os.umask(0o002)
243 try:
237 try:
244 dirname = os.path.dirname(filecachepath)
238 dirname = os.path.dirname(filecachepath)
245 if not os.path.exists(dirname):
239 if not os.path.exists(dirname):
246 try:
240 try:
247 os.makedirs(dirname)
241 os.makedirs(dirname)
248 except OSError as ex:
242 except OSError as ex:
249 if ex.errno != errno.EEXIST:
243 if ex.errno != errno.EEXIST:
250 raise
244 raise
251
245
252 f = None
246 f = None
253 try:
247 try:
254 f = util.atomictempfile(filecachepath, "w")
248 f = util.atomictempfile(filecachepath, "w")
255 f.write(text)
249 f.write(text)
256 except (IOError, OSError):
250 except (IOError, OSError):
257 # Don't abort if the user only has permission to read,
251 # Don't abort if the user only has permission to read,
258 # and not write.
252 # and not write.
259 pass
253 pass
260 finally:
254 finally:
261 if f:
255 if f:
262 f.close()
256 f.close()
263 finally:
257 finally:
264 os.umask(oldumask)
258 os.umask(oldumask)
265 else:
259 else:
266 with open(filecachepath, "r") as f:
260 with open(filecachepath, "r") as f:
267 text = f.read()
261 text = f.read()
268 return text
262 return text
269
263
270 def getflogheads(repo, proto, path):
264 def getflogheads(repo, proto, path):
271 """A server api for requesting a filelog's heads
265 """A server api for requesting a filelog's heads
272 """
266 """
273 flog = repo.file(path)
267 flog = repo.file(path)
274 heads = flog.heads()
268 heads = flog.heads()
275 return '\n'.join((hex(head) for head in heads if head != nullid))
269 return '\n'.join((hex(head) for head in heads if head != nullid))
276
270
277 def getfile(repo, proto, file, node):
271 def getfile(repo, proto, file, node):
278 """A server api for requesting a particular version of a file. Can be used
272 """A server api for requesting a particular version of a file. Can be used
279 in batches to request many files at once. The return protocol is:
273 in batches to request many files at once. The return protocol is:
280 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
274 <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
281 non-zero for an error.
275 non-zero for an error.
282
276
283 data is a compressed blob with revlog flag and ancestors information. See
277 data is a compressed blob with revlog flag and ancestors information. See
284 createfileblob for its content.
278 createfileblob for its content.
285 """
279 """
286 if shallowrepo.requirement in repo.requirements:
280 if shallowrepo.requirement in repo.requirements:
287 return '1\0' + _('cannot fetch remote files from shallow repo')
281 return '1\0' + _('cannot fetch remote files from shallow repo')
288 cachepath = repo.ui.config("remotefilelog", "servercachepath")
282 cachepath = repo.ui.config("remotefilelog", "servercachepath")
289 if not cachepath:
283 if not cachepath:
290 cachepath = os.path.join(repo.path, "remotefilelogcache")
284 cachepath = os.path.join(repo.path, "remotefilelogcache")
291 node = bin(node.strip())
285 node = bin(node.strip())
292 if node == nullid:
286 if node == nullid:
293 return '0\0'
287 return '0\0'
294 return '0\0' + _loadfileblob(repo, cachepath, file, node)
288 return '0\0' + _loadfileblob(repo, cachepath, file, node)
295
289
296 def getfiles(repo, proto):
290 def getfiles(repo, proto):
297 """A server api for requesting particular versions of particular files.
291 """A server api for requesting particular versions of particular files.
298 """
292 """
299 if shallowrepo.requirement in repo.requirements:
293 if shallowrepo.requirement in repo.requirements:
300 raise error.Abort(_('cannot fetch remote files from shallow repo'))
294 raise error.Abort(_('cannot fetch remote files from shallow repo'))
301 if not isinstance(proto, _sshv1server):
295 if not isinstance(proto, _sshv1server):
302 raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
296 raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
303
297
304 def streamer():
298 def streamer():
305 fin = proto._fin
299 fin = proto._fin
306
300
307 cachepath = repo.ui.config("remotefilelog", "servercachepath")
301 cachepath = repo.ui.config("remotefilelog", "servercachepath")
308 if not cachepath:
302 if not cachepath:
309 cachepath = os.path.join(repo.path, "remotefilelogcache")
303 cachepath = os.path.join(repo.path, "remotefilelogcache")
310
304
311 while True:
305 while True:
312 request = fin.readline()[:-1]
306 request = fin.readline()[:-1]
313 if not request:
307 if not request:
314 break
308 break
315
309
316 node = bin(request[:40])
310 node = bin(request[:40])
317 if node == nullid:
311 if node == nullid:
318 yield '0\n'
312 yield '0\n'
319 continue
313 continue
320
314
321 path = request[40:]
315 path = request[40:]
322
316
323 text = _loadfileblob(repo, cachepath, path, node)
317 text = _loadfileblob(repo, cachepath, path, node)
324
318
325 yield '%d\n%s' % (len(text), text)
319 yield '%d\n%s' % (len(text), text)
326
320
327 # it would be better to only flush after processing a whole batch
321 # it would be better to only flush after processing a whole batch
328 # but currently we don't know if there are more requests coming
322 # but currently we don't know if there are more requests coming
329 proto._fout.flush()
323 proto._fout.flush()
330 return wireprototypes.streamres(streamer())
324 return wireprototypes.streamres(streamer())
331
325
332 def createfileblob(filectx):
326 def createfileblob(filectx):
333 """
327 """
334 format:
328 format:
335 v0:
329 v0:
336 str(len(rawtext)) + '\0' + rawtext + ancestortext
330 str(len(rawtext)) + '\0' + rawtext + ancestortext
337 v1:
331 v1:
338 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
332 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
339 metalist := metalist + '\n' + meta | meta
333 metalist := metalist + '\n' + meta | meta
340 meta := sizemeta | flagmeta
334 meta := sizemeta | flagmeta
341 sizemeta := METAKEYSIZE + str(len(rawtext))
335 sizemeta := METAKEYSIZE + str(len(rawtext))
342 flagmeta := METAKEYFLAG + str(flag)
336 flagmeta := METAKEYFLAG + str(flag)
343
337
344 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
338 note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
345 length of 1.
339 length of 1.
346 """
340 """
347 flog = filectx.filelog()
341 flog = filectx.filelog()
348 frev = filectx.filerev()
342 frev = filectx.filerev()
349 revlogflags = flog._revlog.flags(frev)
343 revlogflags = flog._revlog.flags(frev)
350 if revlogflags == 0:
344 if revlogflags == 0:
351 # normal files
345 # normal files
352 text = filectx.data()
346 text = filectx.data()
353 else:
347 else:
354 # lfs, read raw revision data
348 # lfs, read raw revision data
355 text = flog.revision(frev, raw=True)
349 text = flog.revision(frev, raw=True)
356
350
357 repo = filectx._repo
351 repo = filectx._repo
358
352
359 ancestors = [filectx]
353 ancestors = [filectx]
360
354
361 try:
355 try:
362 repo.forcelinkrev = True
356 repo.forcelinkrev = True
363 ancestors.extend([f for f in filectx.ancestors()])
357 ancestors.extend([f for f in filectx.ancestors()])
364
358
365 ancestortext = ""
359 ancestortext = ""
366 for ancestorctx in ancestors:
360 for ancestorctx in ancestors:
367 parents = ancestorctx.parents()
361 parents = ancestorctx.parents()
368 p1 = nullid
362 p1 = nullid
369 p2 = nullid
363 p2 = nullid
370 if len(parents) > 0:
364 if len(parents) > 0:
371 p1 = parents[0].filenode()
365 p1 = parents[0].filenode()
372 if len(parents) > 1:
366 if len(parents) > 1:
373 p2 = parents[1].filenode()
367 p2 = parents[1].filenode()
374
368
375 copyname = ""
369 copyname = ""
376 rename = ancestorctx.renamed()
370 rename = ancestorctx.renamed()
377 if rename:
371 if rename:
378 copyname = rename[0]
372 copyname = rename[0]
379 linknode = ancestorctx.node()
373 linknode = ancestorctx.node()
380 ancestortext += "%s%s%s%s%s\0" % (
374 ancestortext += "%s%s%s%s%s\0" % (
381 ancestorctx.filenode(), p1, p2, linknode,
375 ancestorctx.filenode(), p1, p2, linknode,
382 copyname)
376 copyname)
383 finally:
377 finally:
384 repo.forcelinkrev = False
378 repo.forcelinkrev = False
385
379
386 header = shallowutil.buildfileblobheader(len(text), revlogflags)
380 header = shallowutil.buildfileblobheader(len(text), revlogflags)
387
381
388 return "%s\0%s%s" % (header, text, ancestortext)
382 return "%s\0%s%s" % (header, text, ancestortext)
389
383
390 def gcserver(ui, repo):
384 def gcserver(ui, repo):
391 if not repo.ui.configbool("remotefilelog", "server"):
385 if not repo.ui.configbool("remotefilelog", "server"):
392 return
386 return
393
387
394 neededfiles = set()
388 neededfiles = set()
395 heads = repo.revs("heads(tip~25000:) - null")
389 heads = repo.revs("heads(tip~25000:) - null")
396
390
397 cachepath = repo.vfs.join("remotefilelogcache")
391 cachepath = repo.vfs.join("remotefilelogcache")
398 for head in heads:
392 for head in heads:
399 mf = repo[head].manifest()
393 mf = repo[head].manifest()
400 for filename, filenode in mf.iteritems():
394 for filename, filenode in mf.iteritems():
401 filecachepath = os.path.join(cachepath, filename, hex(filenode))
395 filecachepath = os.path.join(cachepath, filename, hex(filenode))
402 neededfiles.add(filecachepath)
396 neededfiles.add(filecachepath)
403
397
404 # delete unneeded older files
398 # delete unneeded older files
405 days = repo.ui.configint("remotefilelog", "serverexpiration")
399 days = repo.ui.configint("remotefilelog", "serverexpiration")
406 expiration = time.time() - (days * 24 * 60 * 60)
400 expiration = time.time() - (days * 24 * 60 * 60)
407
401
408 _removing = _("removing old server cache")
402 _removing = _("removing old server cache")
409 count = 0
403 count = 0
410 ui.progress(_removing, count, unit="files")
404 ui.progress(_removing, count, unit="files")
411 for root, dirs, files in os.walk(cachepath):
405 for root, dirs, files in os.walk(cachepath):
412 for file in files:
406 for file in files:
413 filepath = os.path.join(root, file)
407 filepath = os.path.join(root, file)
414 count += 1
408 count += 1
415 ui.progress(_removing, count, unit="files")
409 ui.progress(_removing, count, unit="files")
416 if filepath in neededfiles:
410 if filepath in neededfiles:
417 continue
411 continue
418
412
419 stat = os.stat(filepath)
413 stat = os.stat(filepath)
420 if stat.st_mtime < expiration:
414 if stat.st_mtime < expiration:
421 os.remove(filepath)
415 os.remove(filepath)
422
416
423 ui.progress(_removing, None)
417 ui.progress(_removing, None)
424
425 def getpack(repo, proto, args):
426 """A server api for requesting a pack of file information.
427 """
428 if shallowrepo.requirement in repo.requirements:
429 raise error.Abort(_('cannot fetch remote files from shallow repo'))
430 if not isinstance(proto, _sshv1server):
431 raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
432
433 def streamer():
434 """Request format:
435
436 [<filerequest>,...]\0\0
437 filerequest = <filename len: 2 byte><filename><count: 4 byte>
438 [<node: 20 byte>,...]
439
440 Response format:
441 [<fileresponse>,...]<10 null bytes>
442 fileresponse = <filename len: 2 byte><filename><history><deltas>
443 history = <count: 4 byte>[<history entry>,...]
444 historyentry = <node: 20 byte><p1: 20 byte><p2: 20 byte>
445 <linknode: 20 byte><copyfrom len: 2 byte><copyfrom>
446 deltas = <count: 4 byte>[<delta entry>,...]
447 deltaentry = <node: 20 byte><deltabase: 20 byte>
448 <delta len: 8 byte><delta>
449 """
450 fin = proto._fin
451 files = _receivepackrequest(fin)
452
453 # Sort the files by name, so we provide deterministic results
454 for filename, nodes in sorted(files.iteritems()):
455 fl = repo.file(filename)
456
457 # Compute history
458 history = []
459 for rev in ancestor.lazyancestors(fl.parentrevs,
460 [fl.rev(n) for n in nodes],
461 inclusive=True):
462 linkrev = fl.linkrev(rev)
463 node = fl.node(rev)
464 p1node, p2node = fl.parents(node)
465 copyfrom = ''
466 linknode = repo.changelog.node(linkrev)
467 if p1node == nullid:
468 copydata = fl.renamed(node)
469 if copydata:
470 copyfrom, copynode = copydata
471 p1node = copynode
472
473 history.append((node, p1node, p2node, linknode, copyfrom))
474
475 # Scan and send deltas
476 chain = _getdeltachain(fl, nodes, -1)
477
478 for chunk in wirepack.sendpackpart(filename, history, chain):
479 yield chunk
480
481 yield wirepack.closepart()
482 proto._fout.flush()
483
484 return wireprototypes.streamres(streamer())
485
486 def _receivepackrequest(stream):
487 files = {}
488 while True:
489 filenamelen = shallowutil.readunpack(stream,
490 constants.FILENAMESTRUCT)[0]
491 if filenamelen == 0:
492 break
493
494 filename = shallowutil.readexactly(stream, filenamelen)
495
496 nodecount = shallowutil.readunpack(stream,
497 constants.PACKREQUESTCOUNTSTRUCT)[0]
498
499 # Read N nodes
500 nodes = shallowutil.readexactly(stream, constants.NODESIZE * nodecount)
501 nodes = set(nodes[i:i + constants.NODESIZE] for i in
502 pycompat.xrange(0, len(nodes), constants.NODESIZE))
503
504 files[filename] = nodes
505
506 return files
507
508 def _getdeltachain(fl, nodes, stophint):
509 """Produces a chain of deltas that includes each of the given nodes.
510
511 `stophint` - The changeset rev number to stop at. If it's set to >= 0, we
512 will return not only the deltas for the requested nodes, but also all
513 necessary deltas in their delta chains, as long as the deltas have link revs
514 >= the stophint. This allows us to return an approximately minimal delta
515 chain when the user performs a pull. If `stophint` is set to -1, all nodes
516 will return full texts. """
517 chain = []
518
519 seen = set()
520 for node in nodes:
521 startrev = fl.rev(node)
522 cur = startrev
523 while True:
524 if cur in seen:
525 break
526 base = fl._revlog.deltaparent(cur)
527 linkrev = fl.linkrev(cur)
528 node = fl.node(cur)
529 p1, p2 = fl.parentrevs(cur)
530 if linkrev < stophint and cur != startrev:
531 break
532
533 # Return a full text if:
534 # - the caller requested it (via stophint == -1)
535 # - the revlog chain has ended (via base==null or base==node)
536 # - p1 is null. In some situations this can mean it's a copy, so
537 # we need to use fl.read() to remove the copymetadata.
538 if (stophint == -1 or base == nullrev or base == cur
539 or p1 == nullrev):
540 delta = fl.read(cur)
541 base = nullrev
542 else:
543 delta = fl._chunk(cur)
544
545 basenode = fl.node(base)
546 chain.append((node, basenode, delta))
547 seen.add(cur)
548
549 if base == nullrev:
550 break
551 cur = base
552
553 chain.reverse()
554 return chain
General Comments 0
You need to be logged in to leave comments. Login now