##// END OF EJS Templates
progress: make the progress helper a context manager...
Martin von Zweigbergk -
r38393:800f5a2c default
parent child Browse files
Show More
@@ -1,1639 +1,1645
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28
28
29 from . import (
29 from . import (
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 revsetlang,
38 revsetlang,
39 similar,
39 similar,
40 url,
40 url,
41 util,
41 util,
42 vfs,
42 vfs,
43 )
43 )
44
44
45 from .utils import (
45 from .utils import (
46 procutil,
46 procutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 if pycompat.iswindows:
50 if pycompat.iswindows:
51 from . import scmwindows as scmplatform
51 from . import scmwindows as scmplatform
52 else:
52 else:
53 from . import scmposix as scmplatform
53 from . import scmposix as scmplatform
54
54
55 termsize = scmplatform.termsize
55 termsize = scmplatform.termsize
56
56
57 class status(tuple):
57 class status(tuple):
58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 and 'ignored' properties are only relevant to the working copy.
59 and 'ignored' properties are only relevant to the working copy.
60 '''
60 '''
61
61
62 __slots__ = ()
62 __slots__ = ()
63
63
64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 clean):
65 clean):
66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 ignored, clean))
67 ignored, clean))
68
68
69 @property
69 @property
70 def modified(self):
70 def modified(self):
71 '''files that have been modified'''
71 '''files that have been modified'''
72 return self[0]
72 return self[0]
73
73
74 @property
74 @property
75 def added(self):
75 def added(self):
76 '''files that have been added'''
76 '''files that have been added'''
77 return self[1]
77 return self[1]
78
78
79 @property
79 @property
80 def removed(self):
80 def removed(self):
81 '''files that have been removed'''
81 '''files that have been removed'''
82 return self[2]
82 return self[2]
83
83
84 @property
84 @property
85 def deleted(self):
85 def deleted(self):
86 '''files that are in the dirstate, but have been deleted from the
86 '''files that are in the dirstate, but have been deleted from the
87 working copy (aka "missing")
87 working copy (aka "missing")
88 '''
88 '''
89 return self[3]
89 return self[3]
90
90
91 @property
91 @property
92 def unknown(self):
92 def unknown(self):
93 '''files not in the dirstate that are not ignored'''
93 '''files not in the dirstate that are not ignored'''
94 return self[4]
94 return self[4]
95
95
96 @property
96 @property
97 def ignored(self):
97 def ignored(self):
98 '''files not in the dirstate that are ignored (by _dirignore())'''
98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 return self[5]
99 return self[5]
100
100
101 @property
101 @property
102 def clean(self):
102 def clean(self):
103 '''files that have not been modified'''
103 '''files that have not been modified'''
104 return self[6]
104 return self[6]
105
105
106 def __repr__(self, *args, **kwargs):
106 def __repr__(self, *args, **kwargs):
107 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
107 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
108 r'unknown=%s, ignored=%s, clean=%s>') %
108 r'unknown=%s, ignored=%s, clean=%s>') %
109 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
109 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
110
110
111 def itersubrepos(ctx1, ctx2):
111 def itersubrepos(ctx1, ctx2):
112 """find subrepos in ctx1 or ctx2"""
112 """find subrepos in ctx1 or ctx2"""
113 # Create a (subpath, ctx) mapping where we prefer subpaths from
113 # Create a (subpath, ctx) mapping where we prefer subpaths from
114 # ctx1. The subpaths from ctx2 are important when the .hgsub file
114 # ctx1. The subpaths from ctx2 are important when the .hgsub file
115 # has been modified (in ctx2) but not yet committed (in ctx1).
115 # has been modified (in ctx2) but not yet committed (in ctx1).
116 subpaths = dict.fromkeys(ctx2.substate, ctx2)
116 subpaths = dict.fromkeys(ctx2.substate, ctx2)
117 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
117 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
118
118
119 missing = set()
119 missing = set()
120
120
121 for subpath in ctx2.substate:
121 for subpath in ctx2.substate:
122 if subpath not in ctx1.substate:
122 if subpath not in ctx1.substate:
123 del subpaths[subpath]
123 del subpaths[subpath]
124 missing.add(subpath)
124 missing.add(subpath)
125
125
126 for subpath, ctx in sorted(subpaths.iteritems()):
126 for subpath, ctx in sorted(subpaths.iteritems()):
127 yield subpath, ctx.sub(subpath)
127 yield subpath, ctx.sub(subpath)
128
128
129 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
129 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
130 # status and diff will have an accurate result when it does
130 # status and diff will have an accurate result when it does
131 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
131 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
132 # against itself.
132 # against itself.
133 for subpath in missing:
133 for subpath in missing:
134 yield subpath, ctx2.nullsub(subpath, ctx1)
134 yield subpath, ctx2.nullsub(subpath, ctx1)
135
135
136 def nochangesfound(ui, repo, excluded=None):
136 def nochangesfound(ui, repo, excluded=None):
137 '''Report no changes for push/pull, excluded is None or a list of
137 '''Report no changes for push/pull, excluded is None or a list of
138 nodes excluded from the push/pull.
138 nodes excluded from the push/pull.
139 '''
139 '''
140 secretlist = []
140 secretlist = []
141 if excluded:
141 if excluded:
142 for n in excluded:
142 for n in excluded:
143 ctx = repo[n]
143 ctx = repo[n]
144 if ctx.phase() >= phases.secret and not ctx.extinct():
144 if ctx.phase() >= phases.secret and not ctx.extinct():
145 secretlist.append(n)
145 secretlist.append(n)
146
146
147 if secretlist:
147 if secretlist:
148 ui.status(_("no changes found (ignored %d secret changesets)\n")
148 ui.status(_("no changes found (ignored %d secret changesets)\n")
149 % len(secretlist))
149 % len(secretlist))
150 else:
150 else:
151 ui.status(_("no changes found\n"))
151 ui.status(_("no changes found\n"))
152
152
153 def callcatch(ui, func):
153 def callcatch(ui, func):
154 """call func() with global exception handling
154 """call func() with global exception handling
155
155
156 return func() if no exception happens. otherwise do some error handling
156 return func() if no exception happens. otherwise do some error handling
157 and return an exit code accordingly. does not handle all exceptions.
157 and return an exit code accordingly. does not handle all exceptions.
158 """
158 """
159 try:
159 try:
160 try:
160 try:
161 return func()
161 return func()
162 except: # re-raises
162 except: # re-raises
163 ui.traceback()
163 ui.traceback()
164 raise
164 raise
165 # Global exception handling, alphabetically
165 # Global exception handling, alphabetically
166 # Mercurial-specific first, followed by built-in and library exceptions
166 # Mercurial-specific first, followed by built-in and library exceptions
167 except error.LockHeld as inst:
167 except error.LockHeld as inst:
168 if inst.errno == errno.ETIMEDOUT:
168 if inst.errno == errno.ETIMEDOUT:
169 reason = _('timed out waiting for lock held by %r') % inst.locker
169 reason = _('timed out waiting for lock held by %r') % inst.locker
170 else:
170 else:
171 reason = _('lock held by %r') % inst.locker
171 reason = _('lock held by %r') % inst.locker
172 ui.warn(_("abort: %s: %s\n")
172 ui.warn(_("abort: %s: %s\n")
173 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
173 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
174 if not inst.locker:
174 if not inst.locker:
175 ui.warn(_("(lock might be very busy)\n"))
175 ui.warn(_("(lock might be very busy)\n"))
176 except error.LockUnavailable as inst:
176 except error.LockUnavailable as inst:
177 ui.warn(_("abort: could not lock %s: %s\n") %
177 ui.warn(_("abort: could not lock %s: %s\n") %
178 (inst.desc or stringutil.forcebytestr(inst.filename),
178 (inst.desc or stringutil.forcebytestr(inst.filename),
179 encoding.strtolocal(inst.strerror)))
179 encoding.strtolocal(inst.strerror)))
180 except error.OutOfBandError as inst:
180 except error.OutOfBandError as inst:
181 if inst.args:
181 if inst.args:
182 msg = _("abort: remote error:\n")
182 msg = _("abort: remote error:\n")
183 else:
183 else:
184 msg = _("abort: remote error\n")
184 msg = _("abort: remote error\n")
185 ui.warn(msg)
185 ui.warn(msg)
186 if inst.args:
186 if inst.args:
187 ui.warn(''.join(inst.args))
187 ui.warn(''.join(inst.args))
188 if inst.hint:
188 if inst.hint:
189 ui.warn('(%s)\n' % inst.hint)
189 ui.warn('(%s)\n' % inst.hint)
190 except error.RepoError as inst:
190 except error.RepoError as inst:
191 ui.warn(_("abort: %s!\n") % inst)
191 ui.warn(_("abort: %s!\n") % inst)
192 if inst.hint:
192 if inst.hint:
193 ui.warn(_("(%s)\n") % inst.hint)
193 ui.warn(_("(%s)\n") % inst.hint)
194 except error.ResponseError as inst:
194 except error.ResponseError as inst:
195 ui.warn(_("abort: %s") % inst.args[0])
195 ui.warn(_("abort: %s") % inst.args[0])
196 msg = inst.args[1]
196 msg = inst.args[1]
197 if isinstance(msg, type(u'')):
197 if isinstance(msg, type(u'')):
198 msg = pycompat.sysbytes(msg)
198 msg = pycompat.sysbytes(msg)
199 if not isinstance(msg, bytes):
199 if not isinstance(msg, bytes):
200 ui.warn(" %r\n" % (msg,))
200 ui.warn(" %r\n" % (msg,))
201 elif not msg:
201 elif not msg:
202 ui.warn(_(" empty string\n"))
202 ui.warn(_(" empty string\n"))
203 else:
203 else:
204 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
204 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
205 except error.CensoredNodeError as inst:
205 except error.CensoredNodeError as inst:
206 ui.warn(_("abort: file censored %s!\n") % inst)
206 ui.warn(_("abort: file censored %s!\n") % inst)
207 except error.RevlogError as inst:
207 except error.RevlogError as inst:
208 ui.warn(_("abort: %s!\n") % inst)
208 ui.warn(_("abort: %s!\n") % inst)
209 except error.InterventionRequired as inst:
209 except error.InterventionRequired as inst:
210 ui.warn("%s\n" % inst)
210 ui.warn("%s\n" % inst)
211 if inst.hint:
211 if inst.hint:
212 ui.warn(_("(%s)\n") % inst.hint)
212 ui.warn(_("(%s)\n") % inst.hint)
213 return 1
213 return 1
214 except error.WdirUnsupported:
214 except error.WdirUnsupported:
215 ui.warn(_("abort: working directory revision cannot be specified\n"))
215 ui.warn(_("abort: working directory revision cannot be specified\n"))
216 except error.Abort as inst:
216 except error.Abort as inst:
217 ui.warn(_("abort: %s\n") % inst)
217 ui.warn(_("abort: %s\n") % inst)
218 if inst.hint:
218 if inst.hint:
219 ui.warn(_("(%s)\n") % inst.hint)
219 ui.warn(_("(%s)\n") % inst.hint)
220 except ImportError as inst:
220 except ImportError as inst:
221 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
221 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
222 m = stringutil.forcebytestr(inst).split()[-1]
222 m = stringutil.forcebytestr(inst).split()[-1]
223 if m in "mpatch bdiff".split():
223 if m in "mpatch bdiff".split():
224 ui.warn(_("(did you forget to compile extensions?)\n"))
224 ui.warn(_("(did you forget to compile extensions?)\n"))
225 elif m in "zlib".split():
225 elif m in "zlib".split():
226 ui.warn(_("(is your Python install correct?)\n"))
226 ui.warn(_("(is your Python install correct?)\n"))
227 except IOError as inst:
227 except IOError as inst:
228 if util.safehasattr(inst, "code"):
228 if util.safehasattr(inst, "code"):
229 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
229 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
230 elif util.safehasattr(inst, "reason"):
230 elif util.safehasattr(inst, "reason"):
231 try: # usually it is in the form (errno, strerror)
231 try: # usually it is in the form (errno, strerror)
232 reason = inst.reason.args[1]
232 reason = inst.reason.args[1]
233 except (AttributeError, IndexError):
233 except (AttributeError, IndexError):
234 # it might be anything, for example a string
234 # it might be anything, for example a string
235 reason = inst.reason
235 reason = inst.reason
236 if isinstance(reason, pycompat.unicode):
236 if isinstance(reason, pycompat.unicode):
237 # SSLError of Python 2.7.9 contains a unicode
237 # SSLError of Python 2.7.9 contains a unicode
238 reason = encoding.unitolocal(reason)
238 reason = encoding.unitolocal(reason)
239 ui.warn(_("abort: error: %s\n") % reason)
239 ui.warn(_("abort: error: %s\n") % reason)
240 elif (util.safehasattr(inst, "args")
240 elif (util.safehasattr(inst, "args")
241 and inst.args and inst.args[0] == errno.EPIPE):
241 and inst.args and inst.args[0] == errno.EPIPE):
242 pass
242 pass
243 elif getattr(inst, "strerror", None):
243 elif getattr(inst, "strerror", None):
244 if getattr(inst, "filename", None):
244 if getattr(inst, "filename", None):
245 ui.warn(_("abort: %s: %s\n") % (
245 ui.warn(_("abort: %s: %s\n") % (
246 encoding.strtolocal(inst.strerror),
246 encoding.strtolocal(inst.strerror),
247 stringutil.forcebytestr(inst.filename)))
247 stringutil.forcebytestr(inst.filename)))
248 else:
248 else:
249 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
249 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
250 else:
250 else:
251 raise
251 raise
252 except OSError as inst:
252 except OSError as inst:
253 if getattr(inst, "filename", None) is not None:
253 if getattr(inst, "filename", None) is not None:
254 ui.warn(_("abort: %s: '%s'\n") % (
254 ui.warn(_("abort: %s: '%s'\n") % (
255 encoding.strtolocal(inst.strerror),
255 encoding.strtolocal(inst.strerror),
256 stringutil.forcebytestr(inst.filename)))
256 stringutil.forcebytestr(inst.filename)))
257 else:
257 else:
258 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 except MemoryError:
259 except MemoryError:
260 ui.warn(_("abort: out of memory\n"))
260 ui.warn(_("abort: out of memory\n"))
261 except SystemExit as inst:
261 except SystemExit as inst:
262 # Commands shouldn't sys.exit directly, but give a return code.
262 # Commands shouldn't sys.exit directly, but give a return code.
263 # Just in case catch this and and pass exit code to caller.
263 # Just in case catch this and and pass exit code to caller.
264 return inst.code
264 return inst.code
265 except socket.error as inst:
265 except socket.error as inst:
266 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
266 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
267
267
268 return -1
268 return -1
269
269
270 def checknewlabel(repo, lbl, kind):
270 def checknewlabel(repo, lbl, kind):
271 # Do not use the "kind" parameter in ui output.
271 # Do not use the "kind" parameter in ui output.
272 # It makes strings difficult to translate.
272 # It makes strings difficult to translate.
273 if lbl in ['tip', '.', 'null']:
273 if lbl in ['tip', '.', 'null']:
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 for c in (':', '\0', '\n', '\r'):
275 for c in (':', '\0', '\n', '\r'):
276 if c in lbl:
276 if c in lbl:
277 raise error.Abort(
277 raise error.Abort(
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 try:
279 try:
280 int(lbl)
280 int(lbl)
281 raise error.Abort(_("cannot use an integer as a name"))
281 raise error.Abort(_("cannot use an integer as a name"))
282 except ValueError:
282 except ValueError:
283 pass
283 pass
284 if lbl.strip() != lbl:
284 if lbl.strip() != lbl:
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286
286
287 def checkfilename(f):
287 def checkfilename(f):
288 '''Check that the filename f is an acceptable filename for a tracked file'''
288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 if '\r' in f or '\n' in f:
289 if '\r' in f or '\n' in f:
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 % pycompat.bytestr(f))
291 % pycompat.bytestr(f))
292
292
293 def checkportable(ui, f):
293 def checkportable(ui, f):
294 '''Check if filename f is portable and warn or abort depending on config'''
294 '''Check if filename f is portable and warn or abort depending on config'''
295 checkfilename(f)
295 checkfilename(f)
296 abort, warn = checkportabilityalert(ui)
296 abort, warn = checkportabilityalert(ui)
297 if abort or warn:
297 if abort or warn:
298 msg = util.checkwinfilename(f)
298 msg = util.checkwinfilename(f)
299 if msg:
299 if msg:
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 if abort:
301 if abort:
302 raise error.Abort(msg)
302 raise error.Abort(msg)
303 ui.warn(_("warning: %s\n") % msg)
303 ui.warn(_("warning: %s\n") % msg)
304
304
305 def checkportabilityalert(ui):
305 def checkportabilityalert(ui):
306 '''check if the user's config requests nothing, a warning, or abort for
306 '''check if the user's config requests nothing, a warning, or abort for
307 non-portable filenames'''
307 non-portable filenames'''
308 val = ui.config('ui', 'portablefilenames')
308 val = ui.config('ui', 'portablefilenames')
309 lval = val.lower()
309 lval = val.lower()
310 bval = stringutil.parsebool(val)
310 bval = stringutil.parsebool(val)
311 abort = pycompat.iswindows or lval == 'abort'
311 abort = pycompat.iswindows or lval == 'abort'
312 warn = bval or lval == 'warn'
312 warn = bval or lval == 'warn'
313 if bval is None and not (warn or abort or lval == 'ignore'):
313 if bval is None and not (warn or abort or lval == 'ignore'):
314 raise error.ConfigError(
314 raise error.ConfigError(
315 _("ui.portablefilenames value is invalid ('%s')") % val)
315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 return abort, warn
316 return abort, warn
317
317
318 class casecollisionauditor(object):
318 class casecollisionauditor(object):
319 def __init__(self, ui, abort, dirstate):
319 def __init__(self, ui, abort, dirstate):
320 self._ui = ui
320 self._ui = ui
321 self._abort = abort
321 self._abort = abort
322 allfiles = '\0'.join(dirstate._map)
322 allfiles = '\0'.join(dirstate._map)
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 self._dirstate = dirstate
324 self._dirstate = dirstate
325 # The purpose of _newfiles is so that we don't complain about
325 # The purpose of _newfiles is so that we don't complain about
326 # case collisions if someone were to call this object with the
326 # case collisions if someone were to call this object with the
327 # same filename twice.
327 # same filename twice.
328 self._newfiles = set()
328 self._newfiles = set()
329
329
330 def __call__(self, f):
330 def __call__(self, f):
331 if f in self._newfiles:
331 if f in self._newfiles:
332 return
332 return
333 fl = encoding.lower(f)
333 fl = encoding.lower(f)
334 if fl in self._loweredfiles and f not in self._dirstate:
334 if fl in self._loweredfiles and f not in self._dirstate:
335 msg = _('possible case-folding collision for %s') % f
335 msg = _('possible case-folding collision for %s') % f
336 if self._abort:
336 if self._abort:
337 raise error.Abort(msg)
337 raise error.Abort(msg)
338 self._ui.warn(_("warning: %s\n") % msg)
338 self._ui.warn(_("warning: %s\n") % msg)
339 self._loweredfiles.add(fl)
339 self._loweredfiles.add(fl)
340 self._newfiles.add(f)
340 self._newfiles.add(f)
341
341
342 def filteredhash(repo, maxrev):
342 def filteredhash(repo, maxrev):
343 """build hash of filtered revisions in the current repoview.
343 """build hash of filtered revisions in the current repoview.
344
344
345 Multiple caches perform up-to-date validation by checking that the
345 Multiple caches perform up-to-date validation by checking that the
346 tiprev and tipnode stored in the cache file match the current repository.
346 tiprev and tipnode stored in the cache file match the current repository.
347 However, this is not sufficient for validating repoviews because the set
347 However, this is not sufficient for validating repoviews because the set
348 of revisions in the view may change without the repository tiprev and
348 of revisions in the view may change without the repository tiprev and
349 tipnode changing.
349 tipnode changing.
350
350
351 This function hashes all the revs filtered from the view and returns
351 This function hashes all the revs filtered from the view and returns
352 that SHA-1 digest.
352 that SHA-1 digest.
353 """
353 """
354 cl = repo.changelog
354 cl = repo.changelog
355 if not cl.filteredrevs:
355 if not cl.filteredrevs:
356 return None
356 return None
357 key = None
357 key = None
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 if revs:
359 if revs:
360 s = hashlib.sha1()
360 s = hashlib.sha1()
361 for rev in revs:
361 for rev in revs:
362 s.update('%d;' % rev)
362 s.update('%d;' % rev)
363 key = s.digest()
363 key = s.digest()
364 return key
364 return key
365
365
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 '''yield every hg repository under path, always recursively.
367 '''yield every hg repository under path, always recursively.
368 The recurse flag will only control recursion into repo working dirs'''
368 The recurse flag will only control recursion into repo working dirs'''
369 def errhandler(err):
369 def errhandler(err):
370 if err.filename == path:
370 if err.filename == path:
371 raise err
371 raise err
372 samestat = getattr(os.path, 'samestat', None)
372 samestat = getattr(os.path, 'samestat', None)
373 if followsym and samestat is not None:
373 if followsym and samestat is not None:
374 def adddir(dirlst, dirname):
374 def adddir(dirlst, dirname):
375 dirstat = os.stat(dirname)
375 dirstat = os.stat(dirname)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 if not match:
377 if not match:
378 dirlst.append(dirstat)
378 dirlst.append(dirstat)
379 return not match
379 return not match
380 else:
380 else:
381 followsym = False
381 followsym = False
382
382
383 if (seen_dirs is None) and followsym:
383 if (seen_dirs is None) and followsym:
384 seen_dirs = []
384 seen_dirs = []
385 adddir(seen_dirs, path)
385 adddir(seen_dirs, path)
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 dirs.sort()
387 dirs.sort()
388 if '.hg' in dirs:
388 if '.hg' in dirs:
389 yield root # found a repository
389 yield root # found a repository
390 qroot = os.path.join(root, '.hg', 'patches')
390 qroot = os.path.join(root, '.hg', 'patches')
391 if os.path.isdir(os.path.join(qroot, '.hg')):
391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 yield qroot # we have a patch queue repo here
392 yield qroot # we have a patch queue repo here
393 if recurse:
393 if recurse:
394 # avoid recursing inside the .hg directory
394 # avoid recursing inside the .hg directory
395 dirs.remove('.hg')
395 dirs.remove('.hg')
396 else:
396 else:
397 dirs[:] = [] # don't descend further
397 dirs[:] = [] # don't descend further
398 elif followsym:
398 elif followsym:
399 newdirs = []
399 newdirs = []
400 for d in dirs:
400 for d in dirs:
401 fname = os.path.join(root, d)
401 fname = os.path.join(root, d)
402 if adddir(seen_dirs, fname):
402 if adddir(seen_dirs, fname):
403 if os.path.islink(fname):
403 if os.path.islink(fname):
404 for hgname in walkrepos(fname, True, seen_dirs):
404 for hgname in walkrepos(fname, True, seen_dirs):
405 yield hgname
405 yield hgname
406 else:
406 else:
407 newdirs.append(d)
407 newdirs.append(d)
408 dirs[:] = newdirs
408 dirs[:] = newdirs
409
409
410 def binnode(ctx):
410 def binnode(ctx):
411 """Return binary node id for a given basectx"""
411 """Return binary node id for a given basectx"""
412 node = ctx.node()
412 node = ctx.node()
413 if node is None:
413 if node is None:
414 return wdirid
414 return wdirid
415 return node
415 return node
416
416
417 def intrev(ctx):
417 def intrev(ctx):
418 """Return integer for a given basectx that can be used in comparison or
418 """Return integer for a given basectx that can be used in comparison or
419 arithmetic operation"""
419 arithmetic operation"""
420 rev = ctx.rev()
420 rev = ctx.rev()
421 if rev is None:
421 if rev is None:
422 return wdirrev
422 return wdirrev
423 return rev
423 return rev
424
424
425 def formatchangeid(ctx):
425 def formatchangeid(ctx):
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 template provided by logcmdutil.changesettemplater"""
427 template provided by logcmdutil.changesettemplater"""
428 repo = ctx.repo()
428 repo = ctx.repo()
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430
430
431 def formatrevnode(ui, rev, node):
431 def formatrevnode(ui, rev, node):
432 """Format given revision and node depending on the current verbosity"""
432 """Format given revision and node depending on the current verbosity"""
433 if ui.debugflag:
433 if ui.debugflag:
434 hexfunc = hex
434 hexfunc = hex
435 else:
435 else:
436 hexfunc = short
436 hexfunc = short
437 return '%d:%s' % (rev, hexfunc(node))
437 return '%d:%s' % (rev, hexfunc(node))
438
438
439 def resolvehexnodeidprefix(repo, prefix):
439 def resolvehexnodeidprefix(repo, prefix):
440 # Uses unfiltered repo because it's faster when prefix is ambiguous/
440 # Uses unfiltered repo because it's faster when prefix is ambiguous/
441 # This matches the shortesthexnodeidprefix() function below.
441 # This matches the shortesthexnodeidprefix() function below.
442 node = repo.unfiltered().changelog._partialmatch(prefix)
442 node = repo.unfiltered().changelog._partialmatch(prefix)
443 if node is None:
443 if node is None:
444 return
444 return
445 repo.changelog.rev(node) # make sure node isn't filtered
445 repo.changelog.rev(node) # make sure node isn't filtered
446 return node
446 return node
447
447
448 def shortesthexnodeidprefix(repo, node, minlength=1):
448 def shortesthexnodeidprefix(repo, node, minlength=1):
449 """Find the shortest unambiguous prefix that matches hexnode."""
449 """Find the shortest unambiguous prefix that matches hexnode."""
450 # _partialmatch() of filtered changelog could take O(len(repo)) time,
450 # _partialmatch() of filtered changelog could take O(len(repo)) time,
451 # which would be unacceptably slow. so we look for hash collision in
451 # which would be unacceptably slow. so we look for hash collision in
452 # unfiltered space, which means some hashes may be slightly longer.
452 # unfiltered space, which means some hashes may be slightly longer.
453 cl = repo.unfiltered().changelog
453 cl = repo.unfiltered().changelog
454
454
455 def isrev(prefix):
455 def isrev(prefix):
456 try:
456 try:
457 i = int(prefix)
457 i = int(prefix)
458 # if we are a pure int, then starting with zero will not be
458 # if we are a pure int, then starting with zero will not be
459 # confused as a rev; or, obviously, if the int is larger
459 # confused as a rev; or, obviously, if the int is larger
460 # than the value of the tip rev
460 # than the value of the tip rev
461 if prefix[0] == '0' or i > len(cl):
461 if prefix[0] == '0' or i > len(cl):
462 return False
462 return False
463 return True
463 return True
464 except ValueError:
464 except ValueError:
465 return False
465 return False
466
466
467 def disambiguate(prefix):
467 def disambiguate(prefix):
468 """Disambiguate against revnums."""
468 """Disambiguate against revnums."""
469 hexnode = hex(node)
469 hexnode = hex(node)
470 for length in range(len(prefix), len(hexnode) + 1):
470 for length in range(len(prefix), len(hexnode) + 1):
471 prefix = hexnode[:length]
471 prefix = hexnode[:length]
472 if not isrev(prefix):
472 if not isrev(prefix):
473 return prefix
473 return prefix
474
474
475 try:
475 try:
476 return disambiguate(cl.shortest(node, minlength))
476 return disambiguate(cl.shortest(node, minlength))
477 except error.LookupError:
477 except error.LookupError:
478 raise error.RepoLookupError()
478 raise error.RepoLookupError()
479
479
480 def isrevsymbol(repo, symbol):
480 def isrevsymbol(repo, symbol):
481 """Checks if a symbol exists in the repo.
481 """Checks if a symbol exists in the repo.
482
482
483 See revsymbol() for details. Raises error.LookupError if the symbol is an
483 See revsymbol() for details. Raises error.LookupError if the symbol is an
484 ambiguous nodeid prefix.
484 ambiguous nodeid prefix.
485 """
485 """
486 try:
486 try:
487 revsymbol(repo, symbol)
487 revsymbol(repo, symbol)
488 return True
488 return True
489 except error.RepoLookupError:
489 except error.RepoLookupError:
490 return False
490 return False
491
491
492 def revsymbol(repo, symbol):
492 def revsymbol(repo, symbol):
493 """Returns a context given a single revision symbol (as string).
493 """Returns a context given a single revision symbol (as string).
494
494
495 This is similar to revsingle(), but accepts only a single revision symbol,
495 This is similar to revsingle(), but accepts only a single revision symbol,
496 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
496 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
497 not "max(public())".
497 not "max(public())".
498 """
498 """
499 if not isinstance(symbol, bytes):
499 if not isinstance(symbol, bytes):
500 msg = ("symbol (%s of type %s) was not a string, did you mean "
500 msg = ("symbol (%s of type %s) was not a string, did you mean "
501 "repo[symbol]?" % (symbol, type(symbol)))
501 "repo[symbol]?" % (symbol, type(symbol)))
502 raise error.ProgrammingError(msg)
502 raise error.ProgrammingError(msg)
503 try:
503 try:
504 if symbol in ('.', 'tip', 'null'):
504 if symbol in ('.', 'tip', 'null'):
505 return repo[symbol]
505 return repo[symbol]
506
506
507 try:
507 try:
508 r = int(symbol)
508 r = int(symbol)
509 if '%d' % r != symbol:
509 if '%d' % r != symbol:
510 raise ValueError
510 raise ValueError
511 l = len(repo.changelog)
511 l = len(repo.changelog)
512 if r < 0:
512 if r < 0:
513 r += l
513 r += l
514 if r < 0 or r >= l and r != wdirrev:
514 if r < 0 or r >= l and r != wdirrev:
515 raise ValueError
515 raise ValueError
516 return repo[r]
516 return repo[r]
517 except error.FilteredIndexError:
517 except error.FilteredIndexError:
518 raise
518 raise
519 except (ValueError, OverflowError, IndexError):
519 except (ValueError, OverflowError, IndexError):
520 pass
520 pass
521
521
522 if len(symbol) == 40:
522 if len(symbol) == 40:
523 try:
523 try:
524 node = bin(symbol)
524 node = bin(symbol)
525 rev = repo.changelog.rev(node)
525 rev = repo.changelog.rev(node)
526 return repo[rev]
526 return repo[rev]
527 except error.FilteredLookupError:
527 except error.FilteredLookupError:
528 raise
528 raise
529 except (TypeError, LookupError):
529 except (TypeError, LookupError):
530 pass
530 pass
531
531
532 # look up bookmarks through the name interface
532 # look up bookmarks through the name interface
533 try:
533 try:
534 node = repo.names.singlenode(repo, symbol)
534 node = repo.names.singlenode(repo, symbol)
535 rev = repo.changelog.rev(node)
535 rev = repo.changelog.rev(node)
536 return repo[rev]
536 return repo[rev]
537 except KeyError:
537 except KeyError:
538 pass
538 pass
539
539
540 node = resolvehexnodeidprefix(repo, symbol)
540 node = resolvehexnodeidprefix(repo, symbol)
541 if node is not None:
541 if node is not None:
542 rev = repo.changelog.rev(node)
542 rev = repo.changelog.rev(node)
543 return repo[rev]
543 return repo[rev]
544
544
545 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
545 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
546
546
547 except error.WdirUnsupported:
547 except error.WdirUnsupported:
548 return repo[None]
548 return repo[None]
549 except (error.FilteredIndexError, error.FilteredLookupError,
549 except (error.FilteredIndexError, error.FilteredLookupError,
550 error.FilteredRepoLookupError):
550 error.FilteredRepoLookupError):
551 raise _filterederror(repo, symbol)
551 raise _filterederror(repo, symbol)
552
552
553 def _filterederror(repo, changeid):
553 def _filterederror(repo, changeid):
554 """build an exception to be raised about a filtered changeid
554 """build an exception to be raised about a filtered changeid
555
555
556 This is extracted in a function to help extensions (eg: evolve) to
556 This is extracted in a function to help extensions (eg: evolve) to
557 experiment with various message variants."""
557 experiment with various message variants."""
558 if repo.filtername.startswith('visible'):
558 if repo.filtername.startswith('visible'):
559
559
560 # Check if the changeset is obsolete
560 # Check if the changeset is obsolete
561 unfilteredrepo = repo.unfiltered()
561 unfilteredrepo = repo.unfiltered()
562 ctx = revsymbol(unfilteredrepo, changeid)
562 ctx = revsymbol(unfilteredrepo, changeid)
563
563
564 # If the changeset is obsolete, enrich the message with the reason
564 # If the changeset is obsolete, enrich the message with the reason
565 # that made this changeset not visible
565 # that made this changeset not visible
566 if ctx.obsolete():
566 if ctx.obsolete():
567 msg = obsutil._getfilteredreason(repo, changeid, ctx)
567 msg = obsutil._getfilteredreason(repo, changeid, ctx)
568 else:
568 else:
569 msg = _("hidden revision '%s'") % changeid
569 msg = _("hidden revision '%s'") % changeid
570
570
571 hint = _('use --hidden to access hidden revisions')
571 hint = _('use --hidden to access hidden revisions')
572
572
573 return error.FilteredRepoLookupError(msg, hint=hint)
573 return error.FilteredRepoLookupError(msg, hint=hint)
574 msg = _("filtered revision '%s' (not in '%s' subset)")
574 msg = _("filtered revision '%s' (not in '%s' subset)")
575 msg %= (changeid, repo.filtername)
575 msg %= (changeid, repo.filtername)
576 return error.FilteredRepoLookupError(msg)
576 return error.FilteredRepoLookupError(msg)
577
577
578 def revsingle(repo, revspec, default='.', localalias=None):
578 def revsingle(repo, revspec, default='.', localalias=None):
579 if not revspec and revspec != 0:
579 if not revspec and revspec != 0:
580 return repo[default]
580 return repo[default]
581
581
582 l = revrange(repo, [revspec], localalias=localalias)
582 l = revrange(repo, [revspec], localalias=localalias)
583 if not l:
583 if not l:
584 raise error.Abort(_('empty revision set'))
584 raise error.Abort(_('empty revision set'))
585 return repo[l.last()]
585 return repo[l.last()]
586
586
587 def _pairspec(revspec):
587 def _pairspec(revspec):
588 tree = revsetlang.parse(revspec)
588 tree = revsetlang.parse(revspec)
589 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
589 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
590
590
591 def revpair(repo, revs):
591 def revpair(repo, revs):
592 if not revs:
592 if not revs:
593 return repo['.'], repo[None]
593 return repo['.'], repo[None]
594
594
595 l = revrange(repo, revs)
595 l = revrange(repo, revs)
596
596
597 if not l:
597 if not l:
598 first = second = None
598 first = second = None
599 elif l.isascending():
599 elif l.isascending():
600 first = l.min()
600 first = l.min()
601 second = l.max()
601 second = l.max()
602 elif l.isdescending():
602 elif l.isdescending():
603 first = l.max()
603 first = l.max()
604 second = l.min()
604 second = l.min()
605 else:
605 else:
606 first = l.first()
606 first = l.first()
607 second = l.last()
607 second = l.last()
608
608
609 if first is None:
609 if first is None:
610 raise error.Abort(_('empty revision range'))
610 raise error.Abort(_('empty revision range'))
611 if (first == second and len(revs) >= 2
611 if (first == second and len(revs) >= 2
612 and not all(revrange(repo, [r]) for r in revs)):
612 and not all(revrange(repo, [r]) for r in revs)):
613 raise error.Abort(_('empty revision on one side of range'))
613 raise error.Abort(_('empty revision on one side of range'))
614
614
615 # if top-level is range expression, the result must always be a pair
615 # if top-level is range expression, the result must always be a pair
616 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
616 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
617 return repo[first], repo[None]
617 return repo[first], repo[None]
618
618
619 return repo[first], repo[second]
619 return repo[first], repo[second]
620
620
621 def revrange(repo, specs, localalias=None):
621 def revrange(repo, specs, localalias=None):
622 """Execute 1 to many revsets and return the union.
622 """Execute 1 to many revsets and return the union.
623
623
624 This is the preferred mechanism for executing revsets using user-specified
624 This is the preferred mechanism for executing revsets using user-specified
625 config options, such as revset aliases.
625 config options, such as revset aliases.
626
626
627 The revsets specified by ``specs`` will be executed via a chained ``OR``
627 The revsets specified by ``specs`` will be executed via a chained ``OR``
628 expression. If ``specs`` is empty, an empty result is returned.
628 expression. If ``specs`` is empty, an empty result is returned.
629
629
630 ``specs`` can contain integers, in which case they are assumed to be
630 ``specs`` can contain integers, in which case they are assumed to be
631 revision numbers.
631 revision numbers.
632
632
633 It is assumed the revsets are already formatted. If you have arguments
633 It is assumed the revsets are already formatted. If you have arguments
634 that need to be expanded in the revset, call ``revsetlang.formatspec()``
634 that need to be expanded in the revset, call ``revsetlang.formatspec()``
635 and pass the result as an element of ``specs``.
635 and pass the result as an element of ``specs``.
636
636
637 Specifying a single revset is allowed.
637 Specifying a single revset is allowed.
638
638
639 Returns a ``revset.abstractsmartset`` which is a list-like interface over
639 Returns a ``revset.abstractsmartset`` which is a list-like interface over
640 integer revisions.
640 integer revisions.
641 """
641 """
642 allspecs = []
642 allspecs = []
643 for spec in specs:
643 for spec in specs:
644 if isinstance(spec, int):
644 if isinstance(spec, int):
645 spec = revsetlang.formatspec('rev(%d)', spec)
645 spec = revsetlang.formatspec('rev(%d)', spec)
646 allspecs.append(spec)
646 allspecs.append(spec)
647 return repo.anyrevs(allspecs, user=True, localalias=localalias)
647 return repo.anyrevs(allspecs, user=True, localalias=localalias)
648
648
649 def meaningfulparents(repo, ctx):
649 def meaningfulparents(repo, ctx):
650 """Return list of meaningful (or all if debug) parentrevs for rev.
650 """Return list of meaningful (or all if debug) parentrevs for rev.
651
651
652 For merges (two non-nullrev revisions) both parents are meaningful.
652 For merges (two non-nullrev revisions) both parents are meaningful.
653 Otherwise the first parent revision is considered meaningful if it
653 Otherwise the first parent revision is considered meaningful if it
654 is not the preceding revision.
654 is not the preceding revision.
655 """
655 """
656 parents = ctx.parents()
656 parents = ctx.parents()
657 if len(parents) > 1:
657 if len(parents) > 1:
658 return parents
658 return parents
659 if repo.ui.debugflag:
659 if repo.ui.debugflag:
660 return [parents[0], repo['null']]
660 return [parents[0], repo['null']]
661 if parents[0].rev() >= intrev(ctx) - 1:
661 if parents[0].rev() >= intrev(ctx) - 1:
662 return []
662 return []
663 return parents
663 return parents
664
664
665 def expandpats(pats):
665 def expandpats(pats):
666 '''Expand bare globs when running on windows.
666 '''Expand bare globs when running on windows.
667 On posix we assume it already has already been done by sh.'''
667 On posix we assume it already has already been done by sh.'''
668 if not util.expandglobs:
668 if not util.expandglobs:
669 return list(pats)
669 return list(pats)
670 ret = []
670 ret = []
671 for kindpat in pats:
671 for kindpat in pats:
672 kind, pat = matchmod._patsplit(kindpat, None)
672 kind, pat = matchmod._patsplit(kindpat, None)
673 if kind is None:
673 if kind is None:
674 try:
674 try:
675 globbed = glob.glob(pat)
675 globbed = glob.glob(pat)
676 except re.error:
676 except re.error:
677 globbed = [pat]
677 globbed = [pat]
678 if globbed:
678 if globbed:
679 ret.extend(globbed)
679 ret.extend(globbed)
680 continue
680 continue
681 ret.append(kindpat)
681 ret.append(kindpat)
682 return ret
682 return ret
683
683
684 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
684 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
685 badfn=None):
685 badfn=None):
686 '''Return a matcher and the patterns that were used.
686 '''Return a matcher and the patterns that were used.
687 The matcher will warn about bad matches, unless an alternate badfn callback
687 The matcher will warn about bad matches, unless an alternate badfn callback
688 is provided.'''
688 is provided.'''
689 if pats == ("",):
689 if pats == ("",):
690 pats = []
690 pats = []
691 if opts is None:
691 if opts is None:
692 opts = {}
692 opts = {}
693 if not globbed and default == 'relpath':
693 if not globbed and default == 'relpath':
694 pats = expandpats(pats or [])
694 pats = expandpats(pats or [])
695
695
696 def bad(f, msg):
696 def bad(f, msg):
697 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
697 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
698
698
699 if badfn is None:
699 if badfn is None:
700 badfn = bad
700 badfn = bad
701
701
702 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
702 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
703 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
703 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
704
704
705 if m.always():
705 if m.always():
706 pats = []
706 pats = []
707 return m, pats
707 return m, pats
708
708
709 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
709 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
710 badfn=None):
710 badfn=None):
711 '''Return a matcher that will warn about bad matches.'''
711 '''Return a matcher that will warn about bad matches.'''
712 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
712 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
713
713
714 def matchall(repo):
714 def matchall(repo):
715 '''Return a matcher that will efficiently match everything.'''
715 '''Return a matcher that will efficiently match everything.'''
716 return matchmod.always(repo.root, repo.getcwd())
716 return matchmod.always(repo.root, repo.getcwd())
717
717
718 def matchfiles(repo, files, badfn=None):
718 def matchfiles(repo, files, badfn=None):
719 '''Return a matcher that will efficiently match exactly these files.'''
719 '''Return a matcher that will efficiently match exactly these files.'''
720 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
720 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
721
721
722 def parsefollowlinespattern(repo, rev, pat, msg):
722 def parsefollowlinespattern(repo, rev, pat, msg):
723 """Return a file name from `pat` pattern suitable for usage in followlines
723 """Return a file name from `pat` pattern suitable for usage in followlines
724 logic.
724 logic.
725 """
725 """
726 if not matchmod.patkind(pat):
726 if not matchmod.patkind(pat):
727 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
727 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
728 else:
728 else:
729 ctx = repo[rev]
729 ctx = repo[rev]
730 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
730 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
731 files = [f for f in ctx if m(f)]
731 files = [f for f in ctx if m(f)]
732 if len(files) != 1:
732 if len(files) != 1:
733 raise error.ParseError(msg)
733 raise error.ParseError(msg)
734 return files[0]
734 return files[0]
735
735
736 def origpath(ui, repo, filepath):
736 def origpath(ui, repo, filepath):
737 '''customize where .orig files are created
737 '''customize where .orig files are created
738
738
739 Fetch user defined path from config file: [ui] origbackuppath = <path>
739 Fetch user defined path from config file: [ui] origbackuppath = <path>
740 Fall back to default (filepath with .orig suffix) if not specified
740 Fall back to default (filepath with .orig suffix) if not specified
741 '''
741 '''
742 origbackuppath = ui.config('ui', 'origbackuppath')
742 origbackuppath = ui.config('ui', 'origbackuppath')
743 if not origbackuppath:
743 if not origbackuppath:
744 return filepath + ".orig"
744 return filepath + ".orig"
745
745
746 # Convert filepath from an absolute path into a path inside the repo.
746 # Convert filepath from an absolute path into a path inside the repo.
747 filepathfromroot = util.normpath(os.path.relpath(filepath,
747 filepathfromroot = util.normpath(os.path.relpath(filepath,
748 start=repo.root))
748 start=repo.root))
749
749
750 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
750 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
751 origbackupdir = origvfs.dirname(filepathfromroot)
751 origbackupdir = origvfs.dirname(filepathfromroot)
752 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
752 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
753 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
753 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
754
754
755 # Remove any files that conflict with the backup file's path
755 # Remove any files that conflict with the backup file's path
756 for f in reversed(list(util.finddirs(filepathfromroot))):
756 for f in reversed(list(util.finddirs(filepathfromroot))):
757 if origvfs.isfileorlink(f):
757 if origvfs.isfileorlink(f):
758 ui.note(_('removing conflicting file: %s\n')
758 ui.note(_('removing conflicting file: %s\n')
759 % origvfs.join(f))
759 % origvfs.join(f))
760 origvfs.unlink(f)
760 origvfs.unlink(f)
761 break
761 break
762
762
763 origvfs.makedirs(origbackupdir)
763 origvfs.makedirs(origbackupdir)
764
764
765 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
765 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
766 ui.note(_('removing conflicting directory: %s\n')
766 ui.note(_('removing conflicting directory: %s\n')
767 % origvfs.join(filepathfromroot))
767 % origvfs.join(filepathfromroot))
768 origvfs.rmtree(filepathfromroot, forcibly=True)
768 origvfs.rmtree(filepathfromroot, forcibly=True)
769
769
770 return origvfs.join(filepathfromroot)
770 return origvfs.join(filepathfromroot)
771
771
772 class _containsnode(object):
772 class _containsnode(object):
773 """proxy __contains__(node) to container.__contains__ which accepts revs"""
773 """proxy __contains__(node) to container.__contains__ which accepts revs"""
774
774
775 def __init__(self, repo, revcontainer):
775 def __init__(self, repo, revcontainer):
776 self._torev = repo.changelog.rev
776 self._torev = repo.changelog.rev
777 self._revcontains = revcontainer.__contains__
777 self._revcontains = revcontainer.__contains__
778
778
779 def __contains__(self, node):
779 def __contains__(self, node):
780 return self._revcontains(self._torev(node))
780 return self._revcontains(self._torev(node))
781
781
782 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
782 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
783 """do common cleanups when old nodes are replaced by new nodes
783 """do common cleanups when old nodes are replaced by new nodes
784
784
785 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
785 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
786 (we might also want to move working directory parent in the future)
786 (we might also want to move working directory parent in the future)
787
787
788 By default, bookmark moves are calculated automatically from 'replacements',
788 By default, bookmark moves are calculated automatically from 'replacements',
789 but 'moves' can be used to override that. Also, 'moves' may include
789 but 'moves' can be used to override that. Also, 'moves' may include
790 additional bookmark moves that should not have associated obsmarkers.
790 additional bookmark moves that should not have associated obsmarkers.
791
791
792 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
792 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
793 have replacements. operation is a string, like "rebase".
793 have replacements. operation is a string, like "rebase".
794
794
795 metadata is dictionary containing metadata to be stored in obsmarker if
795 metadata is dictionary containing metadata to be stored in obsmarker if
796 obsolescence is enabled.
796 obsolescence is enabled.
797 """
797 """
798 if not replacements and not moves:
798 if not replacements and not moves:
799 return
799 return
800
800
801 # translate mapping's other forms
801 # translate mapping's other forms
802 if not util.safehasattr(replacements, 'items'):
802 if not util.safehasattr(replacements, 'items'):
803 replacements = {n: () for n in replacements}
803 replacements = {n: () for n in replacements}
804
804
805 # Calculate bookmark movements
805 # Calculate bookmark movements
806 if moves is None:
806 if moves is None:
807 moves = {}
807 moves = {}
808 # Unfiltered repo is needed since nodes in replacements might be hidden.
808 # Unfiltered repo is needed since nodes in replacements might be hidden.
809 unfi = repo.unfiltered()
809 unfi = repo.unfiltered()
810 for oldnode, newnodes in replacements.items():
810 for oldnode, newnodes in replacements.items():
811 if oldnode in moves:
811 if oldnode in moves:
812 continue
812 continue
813 if len(newnodes) > 1:
813 if len(newnodes) > 1:
814 # usually a split, take the one with biggest rev number
814 # usually a split, take the one with biggest rev number
815 newnode = next(unfi.set('max(%ln)', newnodes)).node()
815 newnode = next(unfi.set('max(%ln)', newnodes)).node()
816 elif len(newnodes) == 0:
816 elif len(newnodes) == 0:
817 # move bookmark backwards
817 # move bookmark backwards
818 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
818 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
819 list(replacements)))
819 list(replacements)))
820 if roots:
820 if roots:
821 newnode = roots[0].node()
821 newnode = roots[0].node()
822 else:
822 else:
823 newnode = nullid
823 newnode = nullid
824 else:
824 else:
825 newnode = newnodes[0]
825 newnode = newnodes[0]
826 moves[oldnode] = newnode
826 moves[oldnode] = newnode
827
827
828 with repo.transaction('cleanup') as tr:
828 with repo.transaction('cleanup') as tr:
829 # Move bookmarks
829 # Move bookmarks
830 bmarks = repo._bookmarks
830 bmarks = repo._bookmarks
831 bmarkchanges = []
831 bmarkchanges = []
832 allnewnodes = [n for ns in replacements.values() for n in ns]
832 allnewnodes = [n for ns in replacements.values() for n in ns]
833 for oldnode, newnode in moves.items():
833 for oldnode, newnode in moves.items():
834 oldbmarks = repo.nodebookmarks(oldnode)
834 oldbmarks = repo.nodebookmarks(oldnode)
835 if not oldbmarks:
835 if not oldbmarks:
836 continue
836 continue
837 from . import bookmarks # avoid import cycle
837 from . import bookmarks # avoid import cycle
838 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
838 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
839 (util.rapply(pycompat.maybebytestr, oldbmarks),
839 (util.rapply(pycompat.maybebytestr, oldbmarks),
840 hex(oldnode), hex(newnode)))
840 hex(oldnode), hex(newnode)))
841 # Delete divergent bookmarks being parents of related newnodes
841 # Delete divergent bookmarks being parents of related newnodes
842 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
842 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
843 allnewnodes, newnode, oldnode)
843 allnewnodes, newnode, oldnode)
844 deletenodes = _containsnode(repo, deleterevs)
844 deletenodes = _containsnode(repo, deleterevs)
845 for name in oldbmarks:
845 for name in oldbmarks:
846 bmarkchanges.append((name, newnode))
846 bmarkchanges.append((name, newnode))
847 for b in bookmarks.divergent2delete(repo, deletenodes, name):
847 for b in bookmarks.divergent2delete(repo, deletenodes, name):
848 bmarkchanges.append((b, None))
848 bmarkchanges.append((b, None))
849
849
850 if bmarkchanges:
850 if bmarkchanges:
851 bmarks.applychanges(repo, tr, bmarkchanges)
851 bmarks.applychanges(repo, tr, bmarkchanges)
852
852
853 # Obsolete or strip nodes
853 # Obsolete or strip nodes
854 if obsolete.isenabled(repo, obsolete.createmarkersopt):
854 if obsolete.isenabled(repo, obsolete.createmarkersopt):
855 # If a node is already obsoleted, and we want to obsolete it
855 # If a node is already obsoleted, and we want to obsolete it
856 # without a successor, skip that obssolete request since it's
856 # without a successor, skip that obssolete request since it's
857 # unnecessary. That's the "if s or not isobs(n)" check below.
857 # unnecessary. That's the "if s or not isobs(n)" check below.
858 # Also sort the node in topology order, that might be useful for
858 # Also sort the node in topology order, that might be useful for
859 # some obsstore logic.
859 # some obsstore logic.
860 # NOTE: the filtering and sorting might belong to createmarkers.
860 # NOTE: the filtering and sorting might belong to createmarkers.
861 isobs = unfi.obsstore.successors.__contains__
861 isobs = unfi.obsstore.successors.__contains__
862 torev = unfi.changelog.rev
862 torev = unfi.changelog.rev
863 sortfunc = lambda ns: torev(ns[0])
863 sortfunc = lambda ns: torev(ns[0])
864 rels = [(unfi[n], tuple(unfi[m] for m in s))
864 rels = [(unfi[n], tuple(unfi[m] for m in s))
865 for n, s in sorted(replacements.items(), key=sortfunc)
865 for n, s in sorted(replacements.items(), key=sortfunc)
866 if s or not isobs(n)]
866 if s or not isobs(n)]
867 if rels:
867 if rels:
868 obsolete.createmarkers(repo, rels, operation=operation,
868 obsolete.createmarkers(repo, rels, operation=operation,
869 metadata=metadata)
869 metadata=metadata)
870 else:
870 else:
871 from . import repair # avoid import cycle
871 from . import repair # avoid import cycle
872 tostrip = list(replacements)
872 tostrip = list(replacements)
873 if tostrip:
873 if tostrip:
874 repair.delayedstrip(repo.ui, repo, tostrip, operation)
874 repair.delayedstrip(repo.ui, repo, tostrip, operation)
875
875
876 def addremove(repo, matcher, prefix, opts=None):
876 def addremove(repo, matcher, prefix, opts=None):
877 if opts is None:
877 if opts is None:
878 opts = {}
878 opts = {}
879 m = matcher
879 m = matcher
880 dry_run = opts.get('dry_run')
880 dry_run = opts.get('dry_run')
881 try:
881 try:
882 similarity = float(opts.get('similarity') or 0)
882 similarity = float(opts.get('similarity') or 0)
883 except ValueError:
883 except ValueError:
884 raise error.Abort(_('similarity must be a number'))
884 raise error.Abort(_('similarity must be a number'))
885 if similarity < 0 or similarity > 100:
885 if similarity < 0 or similarity > 100:
886 raise error.Abort(_('similarity must be between 0 and 100'))
886 raise error.Abort(_('similarity must be between 0 and 100'))
887 similarity /= 100.0
887 similarity /= 100.0
888
888
889 ret = 0
889 ret = 0
890 join = lambda f: os.path.join(prefix, f)
890 join = lambda f: os.path.join(prefix, f)
891
891
892 wctx = repo[None]
892 wctx = repo[None]
893 for subpath in sorted(wctx.substate):
893 for subpath in sorted(wctx.substate):
894 submatch = matchmod.subdirmatcher(subpath, m)
894 submatch = matchmod.subdirmatcher(subpath, m)
895 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
895 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
896 sub = wctx.sub(subpath)
896 sub = wctx.sub(subpath)
897 try:
897 try:
898 if sub.addremove(submatch, prefix, opts):
898 if sub.addremove(submatch, prefix, opts):
899 ret = 1
899 ret = 1
900 except error.LookupError:
900 except error.LookupError:
901 repo.ui.status(_("skipping missing subrepository: %s\n")
901 repo.ui.status(_("skipping missing subrepository: %s\n")
902 % join(subpath))
902 % join(subpath))
903
903
904 rejected = []
904 rejected = []
905 def badfn(f, msg):
905 def badfn(f, msg):
906 if f in m.files():
906 if f in m.files():
907 m.bad(f, msg)
907 m.bad(f, msg)
908 rejected.append(f)
908 rejected.append(f)
909
909
910 badmatch = matchmod.badmatch(m, badfn)
910 badmatch = matchmod.badmatch(m, badfn)
911 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
911 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
912 badmatch)
912 badmatch)
913
913
914 unknownset = set(unknown + forgotten)
914 unknownset = set(unknown + forgotten)
915 toprint = unknownset.copy()
915 toprint = unknownset.copy()
916 toprint.update(deleted)
916 toprint.update(deleted)
917 for abs in sorted(toprint):
917 for abs in sorted(toprint):
918 if repo.ui.verbose or not m.exact(abs):
918 if repo.ui.verbose or not m.exact(abs):
919 if abs in unknownset:
919 if abs in unknownset:
920 status = _('adding %s\n') % m.uipath(abs)
920 status = _('adding %s\n') % m.uipath(abs)
921 else:
921 else:
922 status = _('removing %s\n') % m.uipath(abs)
922 status = _('removing %s\n') % m.uipath(abs)
923 repo.ui.status(status)
923 repo.ui.status(status)
924
924
925 renames = _findrenames(repo, m, added + unknown, removed + deleted,
925 renames = _findrenames(repo, m, added + unknown, removed + deleted,
926 similarity)
926 similarity)
927
927
928 if not dry_run:
928 if not dry_run:
929 _markchanges(repo, unknown + forgotten, deleted, renames)
929 _markchanges(repo, unknown + forgotten, deleted, renames)
930
930
931 for f in rejected:
931 for f in rejected:
932 if f in m.files():
932 if f in m.files():
933 return 1
933 return 1
934 return ret
934 return ret
935
935
936 def marktouched(repo, files, similarity=0.0):
936 def marktouched(repo, files, similarity=0.0):
937 '''Assert that files have somehow been operated upon. files are relative to
937 '''Assert that files have somehow been operated upon. files are relative to
938 the repo root.'''
938 the repo root.'''
939 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
939 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
940 rejected = []
940 rejected = []
941
941
942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
943
943
944 if repo.ui.verbose:
944 if repo.ui.verbose:
945 unknownset = set(unknown + forgotten)
945 unknownset = set(unknown + forgotten)
946 toprint = unknownset.copy()
946 toprint = unknownset.copy()
947 toprint.update(deleted)
947 toprint.update(deleted)
948 for abs in sorted(toprint):
948 for abs in sorted(toprint):
949 if abs in unknownset:
949 if abs in unknownset:
950 status = _('adding %s\n') % abs
950 status = _('adding %s\n') % abs
951 else:
951 else:
952 status = _('removing %s\n') % abs
952 status = _('removing %s\n') % abs
953 repo.ui.status(status)
953 repo.ui.status(status)
954
954
955 renames = _findrenames(repo, m, added + unknown, removed + deleted,
955 renames = _findrenames(repo, m, added + unknown, removed + deleted,
956 similarity)
956 similarity)
957
957
958 _markchanges(repo, unknown + forgotten, deleted, renames)
958 _markchanges(repo, unknown + forgotten, deleted, renames)
959
959
960 for f in rejected:
960 for f in rejected:
961 if f in m.files():
961 if f in m.files():
962 return 1
962 return 1
963 return 0
963 return 0
964
964
965 def _interestingfiles(repo, matcher):
965 def _interestingfiles(repo, matcher):
966 '''Walk dirstate with matcher, looking for files that addremove would care
966 '''Walk dirstate with matcher, looking for files that addremove would care
967 about.
967 about.
968
968
969 This is different from dirstate.status because it doesn't care about
969 This is different from dirstate.status because it doesn't care about
970 whether files are modified or clean.'''
970 whether files are modified or clean.'''
971 added, unknown, deleted, removed, forgotten = [], [], [], [], []
971 added, unknown, deleted, removed, forgotten = [], [], [], [], []
972 audit_path = pathutil.pathauditor(repo.root, cached=True)
972 audit_path = pathutil.pathauditor(repo.root, cached=True)
973
973
974 ctx = repo[None]
974 ctx = repo[None]
975 dirstate = repo.dirstate
975 dirstate = repo.dirstate
976 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
976 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
977 unknown=True, ignored=False, full=False)
977 unknown=True, ignored=False, full=False)
978 for abs, st in walkresults.iteritems():
978 for abs, st in walkresults.iteritems():
979 dstate = dirstate[abs]
979 dstate = dirstate[abs]
980 if dstate == '?' and audit_path.check(abs):
980 if dstate == '?' and audit_path.check(abs):
981 unknown.append(abs)
981 unknown.append(abs)
982 elif dstate != 'r' and not st:
982 elif dstate != 'r' and not st:
983 deleted.append(abs)
983 deleted.append(abs)
984 elif dstate == 'r' and st:
984 elif dstate == 'r' and st:
985 forgotten.append(abs)
985 forgotten.append(abs)
986 # for finding renames
986 # for finding renames
987 elif dstate == 'r' and not st:
987 elif dstate == 'r' and not st:
988 removed.append(abs)
988 removed.append(abs)
989 elif dstate == 'a':
989 elif dstate == 'a':
990 added.append(abs)
990 added.append(abs)
991
991
992 return added, unknown, deleted, removed, forgotten
992 return added, unknown, deleted, removed, forgotten
993
993
994 def _findrenames(repo, matcher, added, removed, similarity):
994 def _findrenames(repo, matcher, added, removed, similarity):
995 '''Find renames from removed files to added ones.'''
995 '''Find renames from removed files to added ones.'''
996 renames = {}
996 renames = {}
997 if similarity > 0:
997 if similarity > 0:
998 for old, new, score in similar.findrenames(repo, added, removed,
998 for old, new, score in similar.findrenames(repo, added, removed,
999 similarity):
999 similarity):
1000 if (repo.ui.verbose or not matcher.exact(old)
1000 if (repo.ui.verbose or not matcher.exact(old)
1001 or not matcher.exact(new)):
1001 or not matcher.exact(new)):
1002 repo.ui.status(_('recording removal of %s as rename to %s '
1002 repo.ui.status(_('recording removal of %s as rename to %s '
1003 '(%d%% similar)\n') %
1003 '(%d%% similar)\n') %
1004 (matcher.rel(old), matcher.rel(new),
1004 (matcher.rel(old), matcher.rel(new),
1005 score * 100))
1005 score * 100))
1006 renames[new] = old
1006 renames[new] = old
1007 return renames
1007 return renames
1008
1008
1009 def _markchanges(repo, unknown, deleted, renames):
1009 def _markchanges(repo, unknown, deleted, renames):
1010 '''Marks the files in unknown as added, the files in deleted as removed,
1010 '''Marks the files in unknown as added, the files in deleted as removed,
1011 and the files in renames as copied.'''
1011 and the files in renames as copied.'''
1012 wctx = repo[None]
1012 wctx = repo[None]
1013 with repo.wlock():
1013 with repo.wlock():
1014 wctx.forget(deleted)
1014 wctx.forget(deleted)
1015 wctx.add(unknown)
1015 wctx.add(unknown)
1016 for new, old in renames.iteritems():
1016 for new, old in renames.iteritems():
1017 wctx.copy(old, new)
1017 wctx.copy(old, new)
1018
1018
1019 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1019 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1020 """Update the dirstate to reflect the intent of copying src to dst. For
1020 """Update the dirstate to reflect the intent of copying src to dst. For
1021 different reasons it might not end with dst being marked as copied from src.
1021 different reasons it might not end with dst being marked as copied from src.
1022 """
1022 """
1023 origsrc = repo.dirstate.copied(src) or src
1023 origsrc = repo.dirstate.copied(src) or src
1024 if dst == origsrc: # copying back a copy?
1024 if dst == origsrc: # copying back a copy?
1025 if repo.dirstate[dst] not in 'mn' and not dryrun:
1025 if repo.dirstate[dst] not in 'mn' and not dryrun:
1026 repo.dirstate.normallookup(dst)
1026 repo.dirstate.normallookup(dst)
1027 else:
1027 else:
1028 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1028 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1029 if not ui.quiet:
1029 if not ui.quiet:
1030 ui.warn(_("%s has not been committed yet, so no copy "
1030 ui.warn(_("%s has not been committed yet, so no copy "
1031 "data will be stored for %s.\n")
1031 "data will be stored for %s.\n")
1032 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1032 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1033 if repo.dirstate[dst] in '?r' and not dryrun:
1033 if repo.dirstate[dst] in '?r' and not dryrun:
1034 wctx.add([dst])
1034 wctx.add([dst])
1035 elif not dryrun:
1035 elif not dryrun:
1036 wctx.copy(origsrc, dst)
1036 wctx.copy(origsrc, dst)
1037
1037
1038 def readrequires(opener, supported):
1038 def readrequires(opener, supported):
1039 '''Reads and parses .hg/requires and checks if all entries found
1039 '''Reads and parses .hg/requires and checks if all entries found
1040 are in the list of supported features.'''
1040 are in the list of supported features.'''
1041 requirements = set(opener.read("requires").splitlines())
1041 requirements = set(opener.read("requires").splitlines())
1042 missings = []
1042 missings = []
1043 for r in requirements:
1043 for r in requirements:
1044 if r not in supported:
1044 if r not in supported:
1045 if not r or not r[0:1].isalnum():
1045 if not r or not r[0:1].isalnum():
1046 raise error.RequirementError(_(".hg/requires file is corrupt"))
1046 raise error.RequirementError(_(".hg/requires file is corrupt"))
1047 missings.append(r)
1047 missings.append(r)
1048 missings.sort()
1048 missings.sort()
1049 if missings:
1049 if missings:
1050 raise error.RequirementError(
1050 raise error.RequirementError(
1051 _("repository requires features unknown to this Mercurial: %s")
1051 _("repository requires features unknown to this Mercurial: %s")
1052 % " ".join(missings),
1052 % " ".join(missings),
1053 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1053 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1054 " for more information"))
1054 " for more information"))
1055 return requirements
1055 return requirements
1056
1056
1057 def writerequires(opener, requirements):
1057 def writerequires(opener, requirements):
1058 with opener('requires', 'w') as fp:
1058 with opener('requires', 'w') as fp:
1059 for r in sorted(requirements):
1059 for r in sorted(requirements):
1060 fp.write("%s\n" % r)
1060 fp.write("%s\n" % r)
1061
1061
1062 class filecachesubentry(object):
1062 class filecachesubentry(object):
1063 def __init__(self, path, stat):
1063 def __init__(self, path, stat):
1064 self.path = path
1064 self.path = path
1065 self.cachestat = None
1065 self.cachestat = None
1066 self._cacheable = None
1066 self._cacheable = None
1067
1067
1068 if stat:
1068 if stat:
1069 self.cachestat = filecachesubentry.stat(self.path)
1069 self.cachestat = filecachesubentry.stat(self.path)
1070
1070
1071 if self.cachestat:
1071 if self.cachestat:
1072 self._cacheable = self.cachestat.cacheable()
1072 self._cacheable = self.cachestat.cacheable()
1073 else:
1073 else:
1074 # None means we don't know yet
1074 # None means we don't know yet
1075 self._cacheable = None
1075 self._cacheable = None
1076
1076
1077 def refresh(self):
1077 def refresh(self):
1078 if self.cacheable():
1078 if self.cacheable():
1079 self.cachestat = filecachesubentry.stat(self.path)
1079 self.cachestat = filecachesubentry.stat(self.path)
1080
1080
1081 def cacheable(self):
1081 def cacheable(self):
1082 if self._cacheable is not None:
1082 if self._cacheable is not None:
1083 return self._cacheable
1083 return self._cacheable
1084
1084
1085 # we don't know yet, assume it is for now
1085 # we don't know yet, assume it is for now
1086 return True
1086 return True
1087
1087
1088 def changed(self):
1088 def changed(self):
1089 # no point in going further if we can't cache it
1089 # no point in going further if we can't cache it
1090 if not self.cacheable():
1090 if not self.cacheable():
1091 return True
1091 return True
1092
1092
1093 newstat = filecachesubentry.stat(self.path)
1093 newstat = filecachesubentry.stat(self.path)
1094
1094
1095 # we may not know if it's cacheable yet, check again now
1095 # we may not know if it's cacheable yet, check again now
1096 if newstat and self._cacheable is None:
1096 if newstat and self._cacheable is None:
1097 self._cacheable = newstat.cacheable()
1097 self._cacheable = newstat.cacheable()
1098
1098
1099 # check again
1099 # check again
1100 if not self._cacheable:
1100 if not self._cacheable:
1101 return True
1101 return True
1102
1102
1103 if self.cachestat != newstat:
1103 if self.cachestat != newstat:
1104 self.cachestat = newstat
1104 self.cachestat = newstat
1105 return True
1105 return True
1106 else:
1106 else:
1107 return False
1107 return False
1108
1108
1109 @staticmethod
1109 @staticmethod
1110 def stat(path):
1110 def stat(path):
1111 try:
1111 try:
1112 return util.cachestat(path)
1112 return util.cachestat(path)
1113 except OSError as e:
1113 except OSError as e:
1114 if e.errno != errno.ENOENT:
1114 if e.errno != errno.ENOENT:
1115 raise
1115 raise
1116
1116
1117 class filecacheentry(object):
1117 class filecacheentry(object):
1118 def __init__(self, paths, stat=True):
1118 def __init__(self, paths, stat=True):
1119 self._entries = []
1119 self._entries = []
1120 for path in paths:
1120 for path in paths:
1121 self._entries.append(filecachesubentry(path, stat))
1121 self._entries.append(filecachesubentry(path, stat))
1122
1122
1123 def changed(self):
1123 def changed(self):
1124 '''true if any entry has changed'''
1124 '''true if any entry has changed'''
1125 for entry in self._entries:
1125 for entry in self._entries:
1126 if entry.changed():
1126 if entry.changed():
1127 return True
1127 return True
1128 return False
1128 return False
1129
1129
1130 def refresh(self):
1130 def refresh(self):
1131 for entry in self._entries:
1131 for entry in self._entries:
1132 entry.refresh()
1132 entry.refresh()
1133
1133
1134 class filecache(object):
1134 class filecache(object):
1135 '''A property like decorator that tracks files under .hg/ for updates.
1135 '''A property like decorator that tracks files under .hg/ for updates.
1136
1136
1137 Records stat info when called in _filecache.
1137 Records stat info when called in _filecache.
1138
1138
1139 On subsequent calls, compares old stat info with new info, and recreates the
1139 On subsequent calls, compares old stat info with new info, and recreates the
1140 object when any of the files changes, updating the new stat info in
1140 object when any of the files changes, updating the new stat info in
1141 _filecache.
1141 _filecache.
1142
1142
1143 Mercurial either atomic renames or appends for files under .hg,
1143 Mercurial either atomic renames or appends for files under .hg,
1144 so to ensure the cache is reliable we need the filesystem to be able
1144 so to ensure the cache is reliable we need the filesystem to be able
1145 to tell us if a file has been replaced. If it can't, we fallback to
1145 to tell us if a file has been replaced. If it can't, we fallback to
1146 recreating the object on every call (essentially the same behavior as
1146 recreating the object on every call (essentially the same behavior as
1147 propertycache).
1147 propertycache).
1148
1148
1149 '''
1149 '''
1150 def __init__(self, *paths):
1150 def __init__(self, *paths):
1151 self.paths = paths
1151 self.paths = paths
1152
1152
1153 def join(self, obj, fname):
1153 def join(self, obj, fname):
1154 """Used to compute the runtime path of a cached file.
1154 """Used to compute the runtime path of a cached file.
1155
1155
1156 Users should subclass filecache and provide their own version of this
1156 Users should subclass filecache and provide their own version of this
1157 function to call the appropriate join function on 'obj' (an instance
1157 function to call the appropriate join function on 'obj' (an instance
1158 of the class that its member function was decorated).
1158 of the class that its member function was decorated).
1159 """
1159 """
1160 raise NotImplementedError
1160 raise NotImplementedError
1161
1161
1162 def __call__(self, func):
1162 def __call__(self, func):
1163 self.func = func
1163 self.func = func
1164 self.sname = func.__name__
1164 self.sname = func.__name__
1165 self.name = pycompat.sysbytes(self.sname)
1165 self.name = pycompat.sysbytes(self.sname)
1166 return self
1166 return self
1167
1167
1168 def __get__(self, obj, type=None):
1168 def __get__(self, obj, type=None):
1169 # if accessed on the class, return the descriptor itself.
1169 # if accessed on the class, return the descriptor itself.
1170 if obj is None:
1170 if obj is None:
1171 return self
1171 return self
1172 # do we need to check if the file changed?
1172 # do we need to check if the file changed?
1173 if self.sname in obj.__dict__:
1173 if self.sname in obj.__dict__:
1174 assert self.name in obj._filecache, self.name
1174 assert self.name in obj._filecache, self.name
1175 return obj.__dict__[self.sname]
1175 return obj.__dict__[self.sname]
1176
1176
1177 entry = obj._filecache.get(self.name)
1177 entry = obj._filecache.get(self.name)
1178
1178
1179 if entry:
1179 if entry:
1180 if entry.changed():
1180 if entry.changed():
1181 entry.obj = self.func(obj)
1181 entry.obj = self.func(obj)
1182 else:
1182 else:
1183 paths = [self.join(obj, path) for path in self.paths]
1183 paths = [self.join(obj, path) for path in self.paths]
1184
1184
1185 # We stat -before- creating the object so our cache doesn't lie if
1185 # We stat -before- creating the object so our cache doesn't lie if
1186 # a writer modified between the time we read and stat
1186 # a writer modified between the time we read and stat
1187 entry = filecacheentry(paths, True)
1187 entry = filecacheentry(paths, True)
1188 entry.obj = self.func(obj)
1188 entry.obj = self.func(obj)
1189
1189
1190 obj._filecache[self.name] = entry
1190 obj._filecache[self.name] = entry
1191
1191
1192 obj.__dict__[self.sname] = entry.obj
1192 obj.__dict__[self.sname] = entry.obj
1193 return entry.obj
1193 return entry.obj
1194
1194
1195 def __set__(self, obj, value):
1195 def __set__(self, obj, value):
1196 if self.name not in obj._filecache:
1196 if self.name not in obj._filecache:
1197 # we add an entry for the missing value because X in __dict__
1197 # we add an entry for the missing value because X in __dict__
1198 # implies X in _filecache
1198 # implies X in _filecache
1199 paths = [self.join(obj, path) for path in self.paths]
1199 paths = [self.join(obj, path) for path in self.paths]
1200 ce = filecacheentry(paths, False)
1200 ce = filecacheentry(paths, False)
1201 obj._filecache[self.name] = ce
1201 obj._filecache[self.name] = ce
1202 else:
1202 else:
1203 ce = obj._filecache[self.name]
1203 ce = obj._filecache[self.name]
1204
1204
1205 ce.obj = value # update cached copy
1205 ce.obj = value # update cached copy
1206 obj.__dict__[self.sname] = value # update copy returned by obj.x
1206 obj.__dict__[self.sname] = value # update copy returned by obj.x
1207
1207
1208 def __delete__(self, obj):
1208 def __delete__(self, obj):
1209 try:
1209 try:
1210 del obj.__dict__[self.sname]
1210 del obj.__dict__[self.sname]
1211 except KeyError:
1211 except KeyError:
1212 raise AttributeError(self.sname)
1212 raise AttributeError(self.sname)
1213
1213
1214 def extdatasource(repo, source):
1214 def extdatasource(repo, source):
1215 """Gather a map of rev -> value dict from the specified source
1215 """Gather a map of rev -> value dict from the specified source
1216
1216
1217 A source spec is treated as a URL, with a special case shell: type
1217 A source spec is treated as a URL, with a special case shell: type
1218 for parsing the output from a shell command.
1218 for parsing the output from a shell command.
1219
1219
1220 The data is parsed as a series of newline-separated records where
1220 The data is parsed as a series of newline-separated records where
1221 each record is a revision specifier optionally followed by a space
1221 each record is a revision specifier optionally followed by a space
1222 and a freeform string value. If the revision is known locally, it
1222 and a freeform string value. If the revision is known locally, it
1223 is converted to a rev, otherwise the record is skipped.
1223 is converted to a rev, otherwise the record is skipped.
1224
1224
1225 Note that both key and value are treated as UTF-8 and converted to
1225 Note that both key and value are treated as UTF-8 and converted to
1226 the local encoding. This allows uniformity between local and
1226 the local encoding. This allows uniformity between local and
1227 remote data sources.
1227 remote data sources.
1228 """
1228 """
1229
1229
1230 spec = repo.ui.config("extdata", source)
1230 spec = repo.ui.config("extdata", source)
1231 if not spec:
1231 if not spec:
1232 raise error.Abort(_("unknown extdata source '%s'") % source)
1232 raise error.Abort(_("unknown extdata source '%s'") % source)
1233
1233
1234 data = {}
1234 data = {}
1235 src = proc = None
1235 src = proc = None
1236 try:
1236 try:
1237 if spec.startswith("shell:"):
1237 if spec.startswith("shell:"):
1238 # external commands should be run relative to the repo root
1238 # external commands should be run relative to the repo root
1239 cmd = spec[6:]
1239 cmd = spec[6:]
1240 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1240 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1241 close_fds=procutil.closefds,
1241 close_fds=procutil.closefds,
1242 stdout=subprocess.PIPE, cwd=repo.root)
1242 stdout=subprocess.PIPE, cwd=repo.root)
1243 src = proc.stdout
1243 src = proc.stdout
1244 else:
1244 else:
1245 # treat as a URL or file
1245 # treat as a URL or file
1246 src = url.open(repo.ui, spec)
1246 src = url.open(repo.ui, spec)
1247 for l in src:
1247 for l in src:
1248 if " " in l:
1248 if " " in l:
1249 k, v = l.strip().split(" ", 1)
1249 k, v = l.strip().split(" ", 1)
1250 else:
1250 else:
1251 k, v = l.strip(), ""
1251 k, v = l.strip(), ""
1252
1252
1253 k = encoding.tolocal(k)
1253 k = encoding.tolocal(k)
1254 try:
1254 try:
1255 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1255 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1256 except (error.LookupError, error.RepoLookupError):
1256 except (error.LookupError, error.RepoLookupError):
1257 pass # we ignore data for nodes that don't exist locally
1257 pass # we ignore data for nodes that don't exist locally
1258 finally:
1258 finally:
1259 if proc:
1259 if proc:
1260 proc.communicate()
1260 proc.communicate()
1261 if src:
1261 if src:
1262 src.close()
1262 src.close()
1263 if proc and proc.returncode != 0:
1263 if proc and proc.returncode != 0:
1264 raise error.Abort(_("extdata command '%s' failed: %s")
1264 raise error.Abort(_("extdata command '%s' failed: %s")
1265 % (cmd, procutil.explainexit(proc.returncode)))
1265 % (cmd, procutil.explainexit(proc.returncode)))
1266
1266
1267 return data
1267 return data
1268
1268
1269 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1269 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1270 if lock is None:
1270 if lock is None:
1271 raise error.LockInheritanceContractViolation(
1271 raise error.LockInheritanceContractViolation(
1272 'lock can only be inherited while held')
1272 'lock can only be inherited while held')
1273 if environ is None:
1273 if environ is None:
1274 environ = {}
1274 environ = {}
1275 with lock.inherit() as locker:
1275 with lock.inherit() as locker:
1276 environ[envvar] = locker
1276 environ[envvar] = locker
1277 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1277 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1278
1278
1279 def wlocksub(repo, cmd, *args, **kwargs):
1279 def wlocksub(repo, cmd, *args, **kwargs):
1280 """run cmd as a subprocess that allows inheriting repo's wlock
1280 """run cmd as a subprocess that allows inheriting repo's wlock
1281
1281
1282 This can only be called while the wlock is held. This takes all the
1282 This can only be called while the wlock is held. This takes all the
1283 arguments that ui.system does, and returns the exit code of the
1283 arguments that ui.system does, and returns the exit code of the
1284 subprocess."""
1284 subprocess."""
1285 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1285 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1286 **kwargs)
1286 **kwargs)
1287
1287
1288 class progress(object):
1288 class progress(object):
1289 def __init__(self, ui, topic, unit="", total=None):
1289 def __init__(self, ui, topic, unit="", total=None):
1290 self.ui = ui
1290 self.ui = ui
1291 self.pos = 0
1291 self.pos = 0
1292 self.topic = topic
1292 self.topic = topic
1293 self.unit = unit
1293 self.unit = unit
1294 self.total = total
1294 self.total = total
1295
1295
1296 def __enter__(self):
1297 pass
1298
1299 def __exit__(self, exc_type, exc_value, exc_tb):
1300 self.complete()
1301
1296 def update(self, pos, item="", total=None):
1302 def update(self, pos, item="", total=None):
1297 if total:
1303 if total:
1298 self.total = total
1304 self.total = total
1299 self.pos = pos
1305 self.pos = pos
1300 self._print(item)
1306 self._print(item)
1301
1307
1302 def increment(self, step=1, item="", total=None):
1308 def increment(self, step=1, item="", total=None):
1303 self.update(self.pos + step, item, total)
1309 self.update(self.pos + step, item, total)
1304
1310
1305 def complete(self):
1311 def complete(self):
1306 self.update(None)
1312 self.update(None)
1307
1313
1308 def _print(self, item):
1314 def _print(self, item):
1309 self.ui.progress(self.topic, self.pos, item, self.unit,
1315 self.ui.progress(self.topic, self.pos, item, self.unit,
1310 self.total)
1316 self.total)
1311
1317
1312 def gdinitconfig(ui):
1318 def gdinitconfig(ui):
1313 """helper function to know if a repo should be created as general delta
1319 """helper function to know if a repo should be created as general delta
1314 """
1320 """
1315 # experimental config: format.generaldelta
1321 # experimental config: format.generaldelta
1316 return (ui.configbool('format', 'generaldelta')
1322 return (ui.configbool('format', 'generaldelta')
1317 or ui.configbool('format', 'usegeneraldelta'))
1323 or ui.configbool('format', 'usegeneraldelta'))
1318
1324
1319 def gddeltaconfig(ui):
1325 def gddeltaconfig(ui):
1320 """helper function to know if incoming delta should be optimised
1326 """helper function to know if incoming delta should be optimised
1321 """
1327 """
1322 # experimental config: format.generaldelta
1328 # experimental config: format.generaldelta
1323 return ui.configbool('format', 'generaldelta')
1329 return ui.configbool('format', 'generaldelta')
1324
1330
1325 class simplekeyvaluefile(object):
1331 class simplekeyvaluefile(object):
1326 """A simple file with key=value lines
1332 """A simple file with key=value lines
1327
1333
1328 Keys must be alphanumerics and start with a letter, values must not
1334 Keys must be alphanumerics and start with a letter, values must not
1329 contain '\n' characters"""
1335 contain '\n' characters"""
1330 firstlinekey = '__firstline'
1336 firstlinekey = '__firstline'
1331
1337
1332 def __init__(self, vfs, path, keys=None):
1338 def __init__(self, vfs, path, keys=None):
1333 self.vfs = vfs
1339 self.vfs = vfs
1334 self.path = path
1340 self.path = path
1335
1341
1336 def read(self, firstlinenonkeyval=False):
1342 def read(self, firstlinenonkeyval=False):
1337 """Read the contents of a simple key-value file
1343 """Read the contents of a simple key-value file
1338
1344
1339 'firstlinenonkeyval' indicates whether the first line of file should
1345 'firstlinenonkeyval' indicates whether the first line of file should
1340 be treated as a key-value pair or reuturned fully under the
1346 be treated as a key-value pair or reuturned fully under the
1341 __firstline key."""
1347 __firstline key."""
1342 lines = self.vfs.readlines(self.path)
1348 lines = self.vfs.readlines(self.path)
1343 d = {}
1349 d = {}
1344 if firstlinenonkeyval:
1350 if firstlinenonkeyval:
1345 if not lines:
1351 if not lines:
1346 e = _("empty simplekeyvalue file")
1352 e = _("empty simplekeyvalue file")
1347 raise error.CorruptedState(e)
1353 raise error.CorruptedState(e)
1348 # we don't want to include '\n' in the __firstline
1354 # we don't want to include '\n' in the __firstline
1349 d[self.firstlinekey] = lines[0][:-1]
1355 d[self.firstlinekey] = lines[0][:-1]
1350 del lines[0]
1356 del lines[0]
1351
1357
1352 try:
1358 try:
1353 # the 'if line.strip()' part prevents us from failing on empty
1359 # the 'if line.strip()' part prevents us from failing on empty
1354 # lines which only contain '\n' therefore are not skipped
1360 # lines which only contain '\n' therefore are not skipped
1355 # by 'if line'
1361 # by 'if line'
1356 updatedict = dict(line[:-1].split('=', 1) for line in lines
1362 updatedict = dict(line[:-1].split('=', 1) for line in lines
1357 if line.strip())
1363 if line.strip())
1358 if self.firstlinekey in updatedict:
1364 if self.firstlinekey in updatedict:
1359 e = _("%r can't be used as a key")
1365 e = _("%r can't be used as a key")
1360 raise error.CorruptedState(e % self.firstlinekey)
1366 raise error.CorruptedState(e % self.firstlinekey)
1361 d.update(updatedict)
1367 d.update(updatedict)
1362 except ValueError as e:
1368 except ValueError as e:
1363 raise error.CorruptedState(str(e))
1369 raise error.CorruptedState(str(e))
1364 return d
1370 return d
1365
1371
1366 def write(self, data, firstline=None):
1372 def write(self, data, firstline=None):
1367 """Write key=>value mapping to a file
1373 """Write key=>value mapping to a file
1368 data is a dict. Keys must be alphanumerical and start with a letter.
1374 data is a dict. Keys must be alphanumerical and start with a letter.
1369 Values must not contain newline characters.
1375 Values must not contain newline characters.
1370
1376
1371 If 'firstline' is not None, it is written to file before
1377 If 'firstline' is not None, it is written to file before
1372 everything else, as it is, not in a key=value form"""
1378 everything else, as it is, not in a key=value form"""
1373 lines = []
1379 lines = []
1374 if firstline is not None:
1380 if firstline is not None:
1375 lines.append('%s\n' % firstline)
1381 lines.append('%s\n' % firstline)
1376
1382
1377 for k, v in data.items():
1383 for k, v in data.items():
1378 if k == self.firstlinekey:
1384 if k == self.firstlinekey:
1379 e = "key name '%s' is reserved" % self.firstlinekey
1385 e = "key name '%s' is reserved" % self.firstlinekey
1380 raise error.ProgrammingError(e)
1386 raise error.ProgrammingError(e)
1381 if not k[0:1].isalpha():
1387 if not k[0:1].isalpha():
1382 e = "keys must start with a letter in a key-value file"
1388 e = "keys must start with a letter in a key-value file"
1383 raise error.ProgrammingError(e)
1389 raise error.ProgrammingError(e)
1384 if not k.isalnum():
1390 if not k.isalnum():
1385 e = "invalid key name in a simple key-value file"
1391 e = "invalid key name in a simple key-value file"
1386 raise error.ProgrammingError(e)
1392 raise error.ProgrammingError(e)
1387 if '\n' in v:
1393 if '\n' in v:
1388 e = "invalid value in a simple key-value file"
1394 e = "invalid value in a simple key-value file"
1389 raise error.ProgrammingError(e)
1395 raise error.ProgrammingError(e)
1390 lines.append("%s=%s\n" % (k, v))
1396 lines.append("%s=%s\n" % (k, v))
1391 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1397 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1392 fp.write(''.join(lines))
1398 fp.write(''.join(lines))
1393
1399
1394 _reportobsoletedsource = [
1400 _reportobsoletedsource = [
1395 'debugobsolete',
1401 'debugobsolete',
1396 'pull',
1402 'pull',
1397 'push',
1403 'push',
1398 'serve',
1404 'serve',
1399 'unbundle',
1405 'unbundle',
1400 ]
1406 ]
1401
1407
1402 _reportnewcssource = [
1408 _reportnewcssource = [
1403 'pull',
1409 'pull',
1404 'unbundle',
1410 'unbundle',
1405 ]
1411 ]
1406
1412
1407 def prefetchfiles(repo, revs, match):
1413 def prefetchfiles(repo, revs, match):
1408 """Invokes the registered file prefetch functions, allowing extensions to
1414 """Invokes the registered file prefetch functions, allowing extensions to
1409 ensure the corresponding files are available locally, before the command
1415 ensure the corresponding files are available locally, before the command
1410 uses them."""
1416 uses them."""
1411 if match:
1417 if match:
1412 # The command itself will complain about files that don't exist, so
1418 # The command itself will complain about files that don't exist, so
1413 # don't duplicate the message.
1419 # don't duplicate the message.
1414 match = matchmod.badmatch(match, lambda fn, msg: None)
1420 match = matchmod.badmatch(match, lambda fn, msg: None)
1415 else:
1421 else:
1416 match = matchall(repo)
1422 match = matchall(repo)
1417
1423
1418 fileprefetchhooks(repo, revs, match)
1424 fileprefetchhooks(repo, revs, match)
1419
1425
1420 # a list of (repo, revs, match) prefetch functions
1426 # a list of (repo, revs, match) prefetch functions
1421 fileprefetchhooks = util.hooks()
1427 fileprefetchhooks = util.hooks()
1422
1428
1423 # A marker that tells the evolve extension to suppress its own reporting
1429 # A marker that tells the evolve extension to suppress its own reporting
1424 _reportstroubledchangesets = True
1430 _reportstroubledchangesets = True
1425
1431
1426 def registersummarycallback(repo, otr, txnname=''):
1432 def registersummarycallback(repo, otr, txnname=''):
1427 """register a callback to issue a summary after the transaction is closed
1433 """register a callback to issue a summary after the transaction is closed
1428 """
1434 """
1429 def txmatch(sources):
1435 def txmatch(sources):
1430 return any(txnname.startswith(source) for source in sources)
1436 return any(txnname.startswith(source) for source in sources)
1431
1437
1432 categories = []
1438 categories = []
1433
1439
1434 def reportsummary(func):
1440 def reportsummary(func):
1435 """decorator for report callbacks."""
1441 """decorator for report callbacks."""
1436 # The repoview life cycle is shorter than the one of the actual
1442 # The repoview life cycle is shorter than the one of the actual
1437 # underlying repository. So the filtered object can die before the
1443 # underlying repository. So the filtered object can die before the
1438 # weakref is used leading to troubles. We keep a reference to the
1444 # weakref is used leading to troubles. We keep a reference to the
1439 # unfiltered object and restore the filtering when retrieving the
1445 # unfiltered object and restore the filtering when retrieving the
1440 # repository through the weakref.
1446 # repository through the weakref.
1441 filtername = repo.filtername
1447 filtername = repo.filtername
1442 reporef = weakref.ref(repo.unfiltered())
1448 reporef = weakref.ref(repo.unfiltered())
1443 def wrapped(tr):
1449 def wrapped(tr):
1444 repo = reporef()
1450 repo = reporef()
1445 if filtername:
1451 if filtername:
1446 repo = repo.filtered(filtername)
1452 repo = repo.filtered(filtername)
1447 func(repo, tr)
1453 func(repo, tr)
1448 newcat = '%02i-txnreport' % len(categories)
1454 newcat = '%02i-txnreport' % len(categories)
1449 otr.addpostclose(newcat, wrapped)
1455 otr.addpostclose(newcat, wrapped)
1450 categories.append(newcat)
1456 categories.append(newcat)
1451 return wrapped
1457 return wrapped
1452
1458
1453 if txmatch(_reportobsoletedsource):
1459 if txmatch(_reportobsoletedsource):
1454 @reportsummary
1460 @reportsummary
1455 def reportobsoleted(repo, tr):
1461 def reportobsoleted(repo, tr):
1456 obsoleted = obsutil.getobsoleted(repo, tr)
1462 obsoleted = obsutil.getobsoleted(repo, tr)
1457 if obsoleted:
1463 if obsoleted:
1458 repo.ui.status(_('obsoleted %i changesets\n')
1464 repo.ui.status(_('obsoleted %i changesets\n')
1459 % len(obsoleted))
1465 % len(obsoleted))
1460
1466
1461 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1467 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1462 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1468 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1463 instabilitytypes = [
1469 instabilitytypes = [
1464 ('orphan', 'orphan'),
1470 ('orphan', 'orphan'),
1465 ('phase-divergent', 'phasedivergent'),
1471 ('phase-divergent', 'phasedivergent'),
1466 ('content-divergent', 'contentdivergent'),
1472 ('content-divergent', 'contentdivergent'),
1467 ]
1473 ]
1468
1474
1469 def getinstabilitycounts(repo):
1475 def getinstabilitycounts(repo):
1470 filtered = repo.changelog.filteredrevs
1476 filtered = repo.changelog.filteredrevs
1471 counts = {}
1477 counts = {}
1472 for instability, revset in instabilitytypes:
1478 for instability, revset in instabilitytypes:
1473 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1479 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1474 filtered)
1480 filtered)
1475 return counts
1481 return counts
1476
1482
1477 oldinstabilitycounts = getinstabilitycounts(repo)
1483 oldinstabilitycounts = getinstabilitycounts(repo)
1478 @reportsummary
1484 @reportsummary
1479 def reportnewinstabilities(repo, tr):
1485 def reportnewinstabilities(repo, tr):
1480 newinstabilitycounts = getinstabilitycounts(repo)
1486 newinstabilitycounts = getinstabilitycounts(repo)
1481 for instability, revset in instabilitytypes:
1487 for instability, revset in instabilitytypes:
1482 delta = (newinstabilitycounts[instability] -
1488 delta = (newinstabilitycounts[instability] -
1483 oldinstabilitycounts[instability])
1489 oldinstabilitycounts[instability])
1484 if delta > 0:
1490 if delta > 0:
1485 repo.ui.warn(_('%i new %s changesets\n') %
1491 repo.ui.warn(_('%i new %s changesets\n') %
1486 (delta, instability))
1492 (delta, instability))
1487
1493
1488 if txmatch(_reportnewcssource):
1494 if txmatch(_reportnewcssource):
1489 @reportsummary
1495 @reportsummary
1490 def reportnewcs(repo, tr):
1496 def reportnewcs(repo, tr):
1491 """Report the range of new revisions pulled/unbundled."""
1497 """Report the range of new revisions pulled/unbundled."""
1492 newrevs = tr.changes.get('revs', xrange(0, 0))
1498 newrevs = tr.changes.get('revs', xrange(0, 0))
1493 if not newrevs:
1499 if not newrevs:
1494 return
1500 return
1495
1501
1496 # Compute the bounds of new revisions' range, excluding obsoletes.
1502 # Compute the bounds of new revisions' range, excluding obsoletes.
1497 unfi = repo.unfiltered()
1503 unfi = repo.unfiltered()
1498 revs = unfi.revs('%ld and not obsolete()', newrevs)
1504 revs = unfi.revs('%ld and not obsolete()', newrevs)
1499 if not revs:
1505 if not revs:
1500 # Got only obsoletes.
1506 # Got only obsoletes.
1501 return
1507 return
1502 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1508 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1503
1509
1504 if minrev == maxrev:
1510 if minrev == maxrev:
1505 revrange = minrev
1511 revrange = minrev
1506 else:
1512 else:
1507 revrange = '%s:%s' % (minrev, maxrev)
1513 revrange = '%s:%s' % (minrev, maxrev)
1508 repo.ui.status(_('new changesets %s\n') % revrange)
1514 repo.ui.status(_('new changesets %s\n') % revrange)
1509
1515
1510 @reportsummary
1516 @reportsummary
1511 def reportphasechanges(repo, tr):
1517 def reportphasechanges(repo, tr):
1512 """Report statistics of phase changes for changesets pre-existing
1518 """Report statistics of phase changes for changesets pre-existing
1513 pull/unbundle.
1519 pull/unbundle.
1514 """
1520 """
1515 newrevs = tr.changes.get('revs', xrange(0, 0))
1521 newrevs = tr.changes.get('revs', xrange(0, 0))
1516 phasetracking = tr.changes.get('phases', {})
1522 phasetracking = tr.changes.get('phases', {})
1517 if not phasetracking:
1523 if not phasetracking:
1518 return
1524 return
1519 published = [
1525 published = [
1520 rev for rev, (old, new) in phasetracking.iteritems()
1526 rev for rev, (old, new) in phasetracking.iteritems()
1521 if new == phases.public and rev not in newrevs
1527 if new == phases.public and rev not in newrevs
1522 ]
1528 ]
1523 if not published:
1529 if not published:
1524 return
1530 return
1525 repo.ui.status(_('%d local changesets published\n')
1531 repo.ui.status(_('%d local changesets published\n')
1526 % len(published))
1532 % len(published))
1527
1533
1528 def nodesummaries(repo, nodes, maxnumnodes=4):
1534 def nodesummaries(repo, nodes, maxnumnodes=4):
1529 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1535 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1530 return ' '.join(short(h) for h in nodes)
1536 return ' '.join(short(h) for h in nodes)
1531 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1537 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1532 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1538 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1533
1539
1534 def enforcesinglehead(repo, tr, desc):
1540 def enforcesinglehead(repo, tr, desc):
1535 """check that no named branch has multiple heads"""
1541 """check that no named branch has multiple heads"""
1536 if desc in ('strip', 'repair'):
1542 if desc in ('strip', 'repair'):
1537 # skip the logic during strip
1543 # skip the logic during strip
1538 return
1544 return
1539 visible = repo.filtered('visible')
1545 visible = repo.filtered('visible')
1540 # possible improvement: we could restrict the check to affected branch
1546 # possible improvement: we could restrict the check to affected branch
1541 for name, heads in visible.branchmap().iteritems():
1547 for name, heads in visible.branchmap().iteritems():
1542 if len(heads) > 1:
1548 if len(heads) > 1:
1543 msg = _('rejecting multiple heads on branch "%s"')
1549 msg = _('rejecting multiple heads on branch "%s"')
1544 msg %= name
1550 msg %= name
1545 hint = _('%d heads: %s')
1551 hint = _('%d heads: %s')
1546 hint %= (len(heads), nodesummaries(repo, heads))
1552 hint %= (len(heads), nodesummaries(repo, heads))
1547 raise error.Abort(msg, hint=hint)
1553 raise error.Abort(msg, hint=hint)
1548
1554
1549 def wrapconvertsink(sink):
1555 def wrapconvertsink(sink):
1550 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1556 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1551 before it is used, whether or not the convert extension was formally loaded.
1557 before it is used, whether or not the convert extension was formally loaded.
1552 """
1558 """
1553 return sink
1559 return sink
1554
1560
1555 def unhidehashlikerevs(repo, specs, hiddentype):
1561 def unhidehashlikerevs(repo, specs, hiddentype):
1556 """parse the user specs and unhide changesets whose hash or revision number
1562 """parse the user specs and unhide changesets whose hash or revision number
1557 is passed.
1563 is passed.
1558
1564
1559 hiddentype can be: 1) 'warn': warn while unhiding changesets
1565 hiddentype can be: 1) 'warn': warn while unhiding changesets
1560 2) 'nowarn': don't warn while unhiding changesets
1566 2) 'nowarn': don't warn while unhiding changesets
1561
1567
1562 returns a repo object with the required changesets unhidden
1568 returns a repo object with the required changesets unhidden
1563 """
1569 """
1564 if not repo.filtername or not repo.ui.configbool('experimental',
1570 if not repo.filtername or not repo.ui.configbool('experimental',
1565 'directaccess'):
1571 'directaccess'):
1566 return repo
1572 return repo
1567
1573
1568 if repo.filtername not in ('visible', 'visible-hidden'):
1574 if repo.filtername not in ('visible', 'visible-hidden'):
1569 return repo
1575 return repo
1570
1576
1571 symbols = set()
1577 symbols = set()
1572 for spec in specs:
1578 for spec in specs:
1573 try:
1579 try:
1574 tree = revsetlang.parse(spec)
1580 tree = revsetlang.parse(spec)
1575 except error.ParseError: # will be reported by scmutil.revrange()
1581 except error.ParseError: # will be reported by scmutil.revrange()
1576 continue
1582 continue
1577
1583
1578 symbols.update(revsetlang.gethashlikesymbols(tree))
1584 symbols.update(revsetlang.gethashlikesymbols(tree))
1579
1585
1580 if not symbols:
1586 if not symbols:
1581 return repo
1587 return repo
1582
1588
1583 revs = _getrevsfromsymbols(repo, symbols)
1589 revs = _getrevsfromsymbols(repo, symbols)
1584
1590
1585 if not revs:
1591 if not revs:
1586 return repo
1592 return repo
1587
1593
1588 if hiddentype == 'warn':
1594 if hiddentype == 'warn':
1589 unfi = repo.unfiltered()
1595 unfi = repo.unfiltered()
1590 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1596 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1591 repo.ui.warn(_("warning: accessing hidden changesets for write "
1597 repo.ui.warn(_("warning: accessing hidden changesets for write "
1592 "operation: %s\n") % revstr)
1598 "operation: %s\n") % revstr)
1593
1599
1594 # we have to use new filtername to separate branch/tags cache until we can
1600 # we have to use new filtername to separate branch/tags cache until we can
1595 # disbale these cache when revisions are dynamically pinned.
1601 # disbale these cache when revisions are dynamically pinned.
1596 return repo.filtered('visible-hidden', revs)
1602 return repo.filtered('visible-hidden', revs)
1597
1603
1598 def _getrevsfromsymbols(repo, symbols):
1604 def _getrevsfromsymbols(repo, symbols):
1599 """parse the list of symbols and returns a set of revision numbers of hidden
1605 """parse the list of symbols and returns a set of revision numbers of hidden
1600 changesets present in symbols"""
1606 changesets present in symbols"""
1601 revs = set()
1607 revs = set()
1602 unfi = repo.unfiltered()
1608 unfi = repo.unfiltered()
1603 unficl = unfi.changelog
1609 unficl = unfi.changelog
1604 cl = repo.changelog
1610 cl = repo.changelog
1605 tiprev = len(unficl)
1611 tiprev = len(unficl)
1606 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1612 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1607 for s in symbols:
1613 for s in symbols:
1608 try:
1614 try:
1609 n = int(s)
1615 n = int(s)
1610 if n <= tiprev:
1616 if n <= tiprev:
1611 if not allowrevnums:
1617 if not allowrevnums:
1612 continue
1618 continue
1613 else:
1619 else:
1614 if n not in cl:
1620 if n not in cl:
1615 revs.add(n)
1621 revs.add(n)
1616 continue
1622 continue
1617 except ValueError:
1623 except ValueError:
1618 pass
1624 pass
1619
1625
1620 try:
1626 try:
1621 s = resolvehexnodeidprefix(unfi, s)
1627 s = resolvehexnodeidprefix(unfi, s)
1622 except (error.LookupError, error.WdirUnsupported):
1628 except (error.LookupError, error.WdirUnsupported):
1623 s = None
1629 s = None
1624
1630
1625 if s is not None:
1631 if s is not None:
1626 rev = unficl.rev(s)
1632 rev = unficl.rev(s)
1627 if rev not in cl:
1633 if rev not in cl:
1628 revs.add(rev)
1634 revs.add(rev)
1629
1635
1630 return revs
1636 return revs
1631
1637
1632 def bookmarkrevs(repo, mark):
1638 def bookmarkrevs(repo, mark):
1633 """
1639 """
1634 Select revisions reachable by a given bookmark
1640 Select revisions reachable by a given bookmark
1635 """
1641 """
1636 return repo.revs("ancestors(bookmark(%s)) - "
1642 return repo.revs("ancestors(bookmark(%s)) - "
1637 "ancestors(head() and not bookmark(%s)) - "
1643 "ancestors(head() and not bookmark(%s)) - "
1638 "ancestors(bookmark() and not bookmark(%s))",
1644 "ancestors(bookmark() and not bookmark(%s))",
1639 mark, mark, mark)
1645 mark, mark, mark)
@@ -1,644 +1,641
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import struct
12 import struct
13 import warnings
13 import warnings
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 branchmap,
17 branchmap,
18 cacheutil,
18 cacheutil,
19 error,
19 error,
20 phases,
20 phases,
21 pycompat,
21 pycompat,
22 store,
22 store,
23 util,
23 util,
24 )
24 )
25
25
26 def canperformstreamclone(pullop, bundle2=False):
26 def canperformstreamclone(pullop, bundle2=False):
27 """Whether it is possible to perform a streaming clone as part of pull.
27 """Whether it is possible to perform a streaming clone as part of pull.
28
28
29 ``bundle2`` will cause the function to consider stream clone through
29 ``bundle2`` will cause the function to consider stream clone through
30 bundle2 and only through bundle2.
30 bundle2 and only through bundle2.
31
31
32 Returns a tuple of (supported, requirements). ``supported`` is True if
32 Returns a tuple of (supported, requirements). ``supported`` is True if
33 streaming clone is supported and False otherwise. ``requirements`` is
33 streaming clone is supported and False otherwise. ``requirements`` is
34 a set of repo requirements from the remote, or ``None`` if stream clone
34 a set of repo requirements from the remote, or ``None`` if stream clone
35 isn't supported.
35 isn't supported.
36 """
36 """
37 repo = pullop.repo
37 repo = pullop.repo
38 remote = pullop.remote
38 remote = pullop.remote
39
39
40 bundle2supported = False
40 bundle2supported = False
41 if pullop.canusebundle2:
41 if pullop.canusebundle2:
42 if 'v2' in pullop.remotebundle2caps.get('stream', []):
42 if 'v2' in pullop.remotebundle2caps.get('stream', []):
43 bundle2supported = True
43 bundle2supported = True
44 # else
44 # else
45 # Server doesn't support bundle2 stream clone or doesn't support
45 # Server doesn't support bundle2 stream clone or doesn't support
46 # the versions we support. Fall back and possibly allow legacy.
46 # the versions we support. Fall back and possibly allow legacy.
47
47
48 # Ensures legacy code path uses available bundle2.
48 # Ensures legacy code path uses available bundle2.
49 if bundle2supported and not bundle2:
49 if bundle2supported and not bundle2:
50 return False, None
50 return False, None
51 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
51 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
52 elif bundle2 and not bundle2supported:
52 elif bundle2 and not bundle2supported:
53 return False, None
53 return False, None
54
54
55 # Streaming clone only works on empty repositories.
55 # Streaming clone only works on empty repositories.
56 if len(repo):
56 if len(repo):
57 return False, None
57 return False, None
58
58
59 # Streaming clone only works if all data is being requested.
59 # Streaming clone only works if all data is being requested.
60 if pullop.heads:
60 if pullop.heads:
61 return False, None
61 return False, None
62
62
63 streamrequested = pullop.streamclonerequested
63 streamrequested = pullop.streamclonerequested
64
64
65 # If we don't have a preference, let the server decide for us. This
65 # If we don't have a preference, let the server decide for us. This
66 # likely only comes into play in LANs.
66 # likely only comes into play in LANs.
67 if streamrequested is None:
67 if streamrequested is None:
68 # The server can advertise whether to prefer streaming clone.
68 # The server can advertise whether to prefer streaming clone.
69 streamrequested = remote.capable('stream-preferred')
69 streamrequested = remote.capable('stream-preferred')
70
70
71 if not streamrequested:
71 if not streamrequested:
72 return False, None
72 return False, None
73
73
74 # In order for stream clone to work, the client has to support all the
74 # In order for stream clone to work, the client has to support all the
75 # requirements advertised by the server.
75 # requirements advertised by the server.
76 #
76 #
77 # The server advertises its requirements via the "stream" and "streamreqs"
77 # The server advertises its requirements via the "stream" and "streamreqs"
78 # capability. "stream" (a value-less capability) is advertised if and only
78 # capability. "stream" (a value-less capability) is advertised if and only
79 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
79 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
80 # is advertised and contains a comma-delimited list of requirements.
80 # is advertised and contains a comma-delimited list of requirements.
81 requirements = set()
81 requirements = set()
82 if remote.capable('stream'):
82 if remote.capable('stream'):
83 requirements.add('revlogv1')
83 requirements.add('revlogv1')
84 else:
84 else:
85 streamreqs = remote.capable('streamreqs')
85 streamreqs = remote.capable('streamreqs')
86 # This is weird and shouldn't happen with modern servers.
86 # This is weird and shouldn't happen with modern servers.
87 if not streamreqs:
87 if not streamreqs:
88 pullop.repo.ui.warn(_(
88 pullop.repo.ui.warn(_(
89 'warning: stream clone requested but server has them '
89 'warning: stream clone requested but server has them '
90 'disabled\n'))
90 'disabled\n'))
91 return False, None
91 return False, None
92
92
93 streamreqs = set(streamreqs.split(','))
93 streamreqs = set(streamreqs.split(','))
94 # Server requires something we don't support. Bail.
94 # Server requires something we don't support. Bail.
95 missingreqs = streamreqs - repo.supportedformats
95 missingreqs = streamreqs - repo.supportedformats
96 if missingreqs:
96 if missingreqs:
97 pullop.repo.ui.warn(_(
97 pullop.repo.ui.warn(_(
98 'warning: stream clone requested but client is missing '
98 'warning: stream clone requested but client is missing '
99 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
99 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
100 pullop.repo.ui.warn(
100 pullop.repo.ui.warn(
101 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
101 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
102 'for more information)\n'))
102 'for more information)\n'))
103 return False, None
103 return False, None
104 requirements = streamreqs
104 requirements = streamreqs
105
105
106 return True, requirements
106 return True, requirements
107
107
108 def maybeperformlegacystreamclone(pullop):
108 def maybeperformlegacystreamclone(pullop):
109 """Possibly perform a legacy stream clone operation.
109 """Possibly perform a legacy stream clone operation.
110
110
111 Legacy stream clones are performed as part of pull but before all other
111 Legacy stream clones are performed as part of pull but before all other
112 operations.
112 operations.
113
113
114 A legacy stream clone will not be performed if a bundle2 stream clone is
114 A legacy stream clone will not be performed if a bundle2 stream clone is
115 supported.
115 supported.
116 """
116 """
117 supported, requirements = canperformstreamclone(pullop)
117 supported, requirements = canperformstreamclone(pullop)
118
118
119 if not supported:
119 if not supported:
120 return
120 return
121
121
122 repo = pullop.repo
122 repo = pullop.repo
123 remote = pullop.remote
123 remote = pullop.remote
124
124
125 # Save remote branchmap. We will use it later to speed up branchcache
125 # Save remote branchmap. We will use it later to speed up branchcache
126 # creation.
126 # creation.
127 rbranchmap = None
127 rbranchmap = None
128 if remote.capable('branchmap'):
128 if remote.capable('branchmap'):
129 with remote.commandexecutor() as e:
129 with remote.commandexecutor() as e:
130 rbranchmap = e.callcommand('branchmap', {}).result()
130 rbranchmap = e.callcommand('branchmap', {}).result()
131
131
132 repo.ui.status(_('streaming all changes\n'))
132 repo.ui.status(_('streaming all changes\n'))
133
133
134 with remote.commandexecutor() as e:
134 with remote.commandexecutor() as e:
135 fp = e.callcommand('stream_out', {}).result()
135 fp = e.callcommand('stream_out', {}).result()
136
136
137 # TODO strictly speaking, this code should all be inside the context
137 # TODO strictly speaking, this code should all be inside the context
138 # manager because the context manager is supposed to ensure all wire state
138 # manager because the context manager is supposed to ensure all wire state
139 # is flushed when exiting. But the legacy peers don't do this, so it
139 # is flushed when exiting. But the legacy peers don't do this, so it
140 # doesn't matter.
140 # doesn't matter.
141 l = fp.readline()
141 l = fp.readline()
142 try:
142 try:
143 resp = int(l)
143 resp = int(l)
144 except ValueError:
144 except ValueError:
145 raise error.ResponseError(
145 raise error.ResponseError(
146 _('unexpected response from remote server:'), l)
146 _('unexpected response from remote server:'), l)
147 if resp == 1:
147 if resp == 1:
148 raise error.Abort(_('operation forbidden by server'))
148 raise error.Abort(_('operation forbidden by server'))
149 elif resp == 2:
149 elif resp == 2:
150 raise error.Abort(_('locking the remote repository failed'))
150 raise error.Abort(_('locking the remote repository failed'))
151 elif resp != 0:
151 elif resp != 0:
152 raise error.Abort(_('the server sent an unknown error code'))
152 raise error.Abort(_('the server sent an unknown error code'))
153
153
154 l = fp.readline()
154 l = fp.readline()
155 try:
155 try:
156 filecount, bytecount = map(int, l.split(' ', 1))
156 filecount, bytecount = map(int, l.split(' ', 1))
157 except (ValueError, TypeError):
157 except (ValueError, TypeError):
158 raise error.ResponseError(
158 raise error.ResponseError(
159 _('unexpected response from remote server:'), l)
159 _('unexpected response from remote server:'), l)
160
160
161 with repo.lock():
161 with repo.lock():
162 consumev1(repo, fp, filecount, bytecount)
162 consumev1(repo, fp, filecount, bytecount)
163
163
164 # new requirements = old non-format requirements +
164 # new requirements = old non-format requirements +
165 # new format-related remote requirements
165 # new format-related remote requirements
166 # requirements from the streamed-in repository
166 # requirements from the streamed-in repository
167 repo.requirements = requirements | (
167 repo.requirements = requirements | (
168 repo.requirements - repo.supportedformats)
168 repo.requirements - repo.supportedformats)
169 repo._applyopenerreqs()
169 repo._applyopenerreqs()
170 repo._writerequirements()
170 repo._writerequirements()
171
171
172 if rbranchmap:
172 if rbranchmap:
173 branchmap.replacecache(repo, rbranchmap)
173 branchmap.replacecache(repo, rbranchmap)
174
174
175 repo.invalidate()
175 repo.invalidate()
176
176
177 def allowservergeneration(repo):
177 def allowservergeneration(repo):
178 """Whether streaming clones are allowed from the server."""
178 """Whether streaming clones are allowed from the server."""
179 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
179 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
180 return False
180 return False
181
181
182 # The way stream clone works makes it impossible to hide secret changesets.
182 # The way stream clone works makes it impossible to hide secret changesets.
183 # So don't allow this by default.
183 # So don't allow this by default.
184 secret = phases.hassecret(repo)
184 secret = phases.hassecret(repo)
185 if secret:
185 if secret:
186 return repo.ui.configbool('server', 'uncompressedallowsecret')
186 return repo.ui.configbool('server', 'uncompressedallowsecret')
187
187
188 return True
188 return True
189
189
190 # This is it's own function so extensions can override it.
190 # This is it's own function so extensions can override it.
191 def _walkstreamfiles(repo):
191 def _walkstreamfiles(repo):
192 return repo.store.walk()
192 return repo.store.walk()
193
193
194 def generatev1(repo):
194 def generatev1(repo):
195 """Emit content for version 1 of a streaming clone.
195 """Emit content for version 1 of a streaming clone.
196
196
197 This returns a 3-tuple of (file count, byte size, data iterator).
197 This returns a 3-tuple of (file count, byte size, data iterator).
198
198
199 The data iterator consists of N entries for each file being transferred.
199 The data iterator consists of N entries for each file being transferred.
200 Each file entry starts as a line with the file name and integer size
200 Each file entry starts as a line with the file name and integer size
201 delimited by a null byte.
201 delimited by a null byte.
202
202
203 The raw file data follows. Following the raw file data is the next file
203 The raw file data follows. Following the raw file data is the next file
204 entry, or EOF.
204 entry, or EOF.
205
205
206 When used on the wire protocol, an additional line indicating protocol
206 When used on the wire protocol, an additional line indicating protocol
207 success will be prepended to the stream. This function is not responsible
207 success will be prepended to the stream. This function is not responsible
208 for adding it.
208 for adding it.
209
209
210 This function will obtain a repository lock to ensure a consistent view of
210 This function will obtain a repository lock to ensure a consistent view of
211 the store is captured. It therefore may raise LockError.
211 the store is captured. It therefore may raise LockError.
212 """
212 """
213 entries = []
213 entries = []
214 total_bytes = 0
214 total_bytes = 0
215 # Get consistent snapshot of repo, lock during scan.
215 # Get consistent snapshot of repo, lock during scan.
216 with repo.lock():
216 with repo.lock():
217 repo.ui.debug('scanning\n')
217 repo.ui.debug('scanning\n')
218 for name, ename, size in _walkstreamfiles(repo):
218 for name, ename, size in _walkstreamfiles(repo):
219 if size:
219 if size:
220 entries.append((name, size))
220 entries.append((name, size))
221 total_bytes += size
221 total_bytes += size
222
222
223 repo.ui.debug('%d files, %d bytes to transfer\n' %
223 repo.ui.debug('%d files, %d bytes to transfer\n' %
224 (len(entries), total_bytes))
224 (len(entries), total_bytes))
225
225
226 svfs = repo.svfs
226 svfs = repo.svfs
227 debugflag = repo.ui.debugflag
227 debugflag = repo.ui.debugflag
228
228
229 def emitrevlogdata():
229 def emitrevlogdata():
230 for name, size in entries:
230 for name, size in entries:
231 if debugflag:
231 if debugflag:
232 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
232 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
233 # partially encode name over the wire for backwards compat
233 # partially encode name over the wire for backwards compat
234 yield '%s\0%d\n' % (store.encodedir(name), size)
234 yield '%s\0%d\n' % (store.encodedir(name), size)
235 # auditing at this stage is both pointless (paths are already
235 # auditing at this stage is both pointless (paths are already
236 # trusted by the local repo) and expensive
236 # trusted by the local repo) and expensive
237 with svfs(name, 'rb', auditpath=False) as fp:
237 with svfs(name, 'rb', auditpath=False) as fp:
238 if size <= 65536:
238 if size <= 65536:
239 yield fp.read(size)
239 yield fp.read(size)
240 else:
240 else:
241 for chunk in util.filechunkiter(fp, limit=size):
241 for chunk in util.filechunkiter(fp, limit=size):
242 yield chunk
242 yield chunk
243
243
244 return len(entries), total_bytes, emitrevlogdata()
244 return len(entries), total_bytes, emitrevlogdata()
245
245
246 def generatev1wireproto(repo):
246 def generatev1wireproto(repo):
247 """Emit content for version 1 of streaming clone suitable for the wire.
247 """Emit content for version 1 of streaming clone suitable for the wire.
248
248
249 This is the data output from ``generatev1()`` with 2 header lines. The
249 This is the data output from ``generatev1()`` with 2 header lines. The
250 first line indicates overall success. The 2nd contains the file count and
250 first line indicates overall success. The 2nd contains the file count and
251 byte size of payload.
251 byte size of payload.
252
252
253 The success line contains "0" for success, "1" for stream generation not
253 The success line contains "0" for success, "1" for stream generation not
254 allowed, and "2" for error locking the repository (possibly indicating
254 allowed, and "2" for error locking the repository (possibly indicating
255 a permissions error for the server process).
255 a permissions error for the server process).
256 """
256 """
257 if not allowservergeneration(repo):
257 if not allowservergeneration(repo):
258 yield '1\n'
258 yield '1\n'
259 return
259 return
260
260
261 try:
261 try:
262 filecount, bytecount, it = generatev1(repo)
262 filecount, bytecount, it = generatev1(repo)
263 except error.LockError:
263 except error.LockError:
264 yield '2\n'
264 yield '2\n'
265 return
265 return
266
266
267 # Indicates successful response.
267 # Indicates successful response.
268 yield '0\n'
268 yield '0\n'
269 yield '%d %d\n' % (filecount, bytecount)
269 yield '%d %d\n' % (filecount, bytecount)
270 for chunk in it:
270 for chunk in it:
271 yield chunk
271 yield chunk
272
272
273 def generatebundlev1(repo, compression='UN'):
273 def generatebundlev1(repo, compression='UN'):
274 """Emit content for version 1 of a stream clone bundle.
274 """Emit content for version 1 of a stream clone bundle.
275
275
276 The first 4 bytes of the output ("HGS1") denote this as stream clone
276 The first 4 bytes of the output ("HGS1") denote this as stream clone
277 bundle version 1.
277 bundle version 1.
278
278
279 The next 2 bytes indicate the compression type. Only "UN" is currently
279 The next 2 bytes indicate the compression type. Only "UN" is currently
280 supported.
280 supported.
281
281
282 The next 16 bytes are two 64-bit big endian unsigned integers indicating
282 The next 16 bytes are two 64-bit big endian unsigned integers indicating
283 file count and byte count, respectively.
283 file count and byte count, respectively.
284
284
285 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
285 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
286 of the requirements string, including a trailing \0. The following N bytes
286 of the requirements string, including a trailing \0. The following N bytes
287 are the requirements string, which is ASCII containing a comma-delimited
287 are the requirements string, which is ASCII containing a comma-delimited
288 list of repo requirements that are needed to support the data.
288 list of repo requirements that are needed to support the data.
289
289
290 The remaining content is the output of ``generatev1()`` (which may be
290 The remaining content is the output of ``generatev1()`` (which may be
291 compressed in the future).
291 compressed in the future).
292
292
293 Returns a tuple of (requirements, data generator).
293 Returns a tuple of (requirements, data generator).
294 """
294 """
295 if compression != 'UN':
295 if compression != 'UN':
296 raise ValueError('we do not support the compression argument yet')
296 raise ValueError('we do not support the compression argument yet')
297
297
298 requirements = repo.requirements & repo.supportedformats
298 requirements = repo.requirements & repo.supportedformats
299 requires = ','.join(sorted(requirements))
299 requires = ','.join(sorted(requirements))
300
300
301 def gen():
301 def gen():
302 yield 'HGS1'
302 yield 'HGS1'
303 yield compression
303 yield compression
304
304
305 filecount, bytecount, it = generatev1(repo)
305 filecount, bytecount, it = generatev1(repo)
306 repo.ui.status(_('writing %d bytes for %d files\n') %
306 repo.ui.status(_('writing %d bytes for %d files\n') %
307 (bytecount, filecount))
307 (bytecount, filecount))
308
308
309 yield struct.pack('>QQ', filecount, bytecount)
309 yield struct.pack('>QQ', filecount, bytecount)
310 yield struct.pack('>H', len(requires) + 1)
310 yield struct.pack('>H', len(requires) + 1)
311 yield requires + '\0'
311 yield requires + '\0'
312
312
313 # This is where we'll add compression in the future.
313 # This is where we'll add compression in the future.
314 assert compression == 'UN'
314 assert compression == 'UN'
315
315
316 progress = repo.ui.makeprogress(_('bundle'), total=bytecount,
316 progress = repo.ui.makeprogress(_('bundle'), total=bytecount,
317 unit=_('bytes'))
317 unit=_('bytes'))
318 progress.update(0)
318 progress.update(0)
319
319
320 for chunk in it:
320 for chunk in it:
321 progress.increment(step=len(chunk))
321 progress.increment(step=len(chunk))
322 yield chunk
322 yield chunk
323
323
324 progress.complete()
324 progress.complete()
325
325
326 return requirements, gen()
326 return requirements, gen()
327
327
328 def consumev1(repo, fp, filecount, bytecount):
328 def consumev1(repo, fp, filecount, bytecount):
329 """Apply the contents from version 1 of a streaming clone file handle.
329 """Apply the contents from version 1 of a streaming clone file handle.
330
330
331 This takes the output from "stream_out" and applies it to the specified
331 This takes the output from "stream_out" and applies it to the specified
332 repository.
332 repository.
333
333
334 Like "stream_out," the status line added by the wire protocol is not
334 Like "stream_out," the status line added by the wire protocol is not
335 handled by this function.
335 handled by this function.
336 """
336 """
337 with repo.lock():
337 with repo.lock():
338 repo.ui.status(_('%d files to transfer, %s of data\n') %
338 repo.ui.status(_('%d files to transfer, %s of data\n') %
339 (filecount, util.bytecount(bytecount)))
339 (filecount, util.bytecount(bytecount)))
340 progress = repo.ui.makeprogress(_('clone'), total=bytecount,
340 progress = repo.ui.makeprogress(_('clone'), total=bytecount,
341 unit=_('bytes'))
341 unit=_('bytes'))
342 progress.update(0)
342 progress.update(0)
343 start = util.timer()
343 start = util.timer()
344
344
345 # TODO: get rid of (potential) inconsistency
345 # TODO: get rid of (potential) inconsistency
346 #
346 #
347 # If transaction is started and any @filecache property is
347 # If transaction is started and any @filecache property is
348 # changed at this point, it causes inconsistency between
348 # changed at this point, it causes inconsistency between
349 # in-memory cached property and streamclone-ed file on the
349 # in-memory cached property and streamclone-ed file on the
350 # disk. Nested transaction prevents transaction scope "clone"
350 # disk. Nested transaction prevents transaction scope "clone"
351 # below from writing in-memory changes out at the end of it,
351 # below from writing in-memory changes out at the end of it,
352 # even though in-memory changes are discarded at the end of it
352 # even though in-memory changes are discarded at the end of it
353 # regardless of transaction nesting.
353 # regardless of transaction nesting.
354 #
354 #
355 # But transaction nesting can't be simply prohibited, because
355 # But transaction nesting can't be simply prohibited, because
356 # nesting occurs also in ordinary case (e.g. enabling
356 # nesting occurs also in ordinary case (e.g. enabling
357 # clonebundles).
357 # clonebundles).
358
358
359 with repo.transaction('clone'):
359 with repo.transaction('clone'):
360 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
360 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
361 for i in xrange(filecount):
361 for i in xrange(filecount):
362 # XXX doesn't support '\n' or '\r' in filenames
362 # XXX doesn't support '\n' or '\r' in filenames
363 l = fp.readline()
363 l = fp.readline()
364 try:
364 try:
365 name, size = l.split('\0', 1)
365 name, size = l.split('\0', 1)
366 size = int(size)
366 size = int(size)
367 except (ValueError, TypeError):
367 except (ValueError, TypeError):
368 raise error.ResponseError(
368 raise error.ResponseError(
369 _('unexpected response from remote server:'), l)
369 _('unexpected response from remote server:'), l)
370 if repo.ui.debugflag:
370 if repo.ui.debugflag:
371 repo.ui.debug('adding %s (%s)\n' %
371 repo.ui.debug('adding %s (%s)\n' %
372 (name, util.bytecount(size)))
372 (name, util.bytecount(size)))
373 # for backwards compat, name was partially encoded
373 # for backwards compat, name was partially encoded
374 path = store.decodedir(name)
374 path = store.decodedir(name)
375 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
375 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
376 for chunk in util.filechunkiter(fp, limit=size):
376 for chunk in util.filechunkiter(fp, limit=size):
377 progress.increment(step=len(chunk))
377 progress.increment(step=len(chunk))
378 ofp.write(chunk)
378 ofp.write(chunk)
379
379
380 # force @filecache properties to be reloaded from
380 # force @filecache properties to be reloaded from
381 # streamclone-ed file at next access
381 # streamclone-ed file at next access
382 repo.invalidate(clearfilecache=True)
382 repo.invalidate(clearfilecache=True)
383
383
384 elapsed = util.timer() - start
384 elapsed = util.timer() - start
385 if elapsed <= 0:
385 if elapsed <= 0:
386 elapsed = 0.001
386 elapsed = 0.001
387 progress.complete()
387 progress.complete()
388 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
388 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
389 (util.bytecount(bytecount), elapsed,
389 (util.bytecount(bytecount), elapsed,
390 util.bytecount(bytecount / elapsed)))
390 util.bytecount(bytecount / elapsed)))
391
391
392 def readbundle1header(fp):
392 def readbundle1header(fp):
393 compression = fp.read(2)
393 compression = fp.read(2)
394 if compression != 'UN':
394 if compression != 'UN':
395 raise error.Abort(_('only uncompressed stream clone bundles are '
395 raise error.Abort(_('only uncompressed stream clone bundles are '
396 'supported; got %s') % compression)
396 'supported; got %s') % compression)
397
397
398 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
398 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
399 requireslen = struct.unpack('>H', fp.read(2))[0]
399 requireslen = struct.unpack('>H', fp.read(2))[0]
400 requires = fp.read(requireslen)
400 requires = fp.read(requireslen)
401
401
402 if not requires.endswith('\0'):
402 if not requires.endswith('\0'):
403 raise error.Abort(_('malformed stream clone bundle: '
403 raise error.Abort(_('malformed stream clone bundle: '
404 'requirements not properly encoded'))
404 'requirements not properly encoded'))
405
405
406 requirements = set(requires.rstrip('\0').split(','))
406 requirements = set(requires.rstrip('\0').split(','))
407
407
408 return filecount, bytecount, requirements
408 return filecount, bytecount, requirements
409
409
410 def applybundlev1(repo, fp):
410 def applybundlev1(repo, fp):
411 """Apply the content from a stream clone bundle version 1.
411 """Apply the content from a stream clone bundle version 1.
412
412
413 We assume the 4 byte header has been read and validated and the file handle
413 We assume the 4 byte header has been read and validated and the file handle
414 is at the 2 byte compression identifier.
414 is at the 2 byte compression identifier.
415 """
415 """
416 if len(repo):
416 if len(repo):
417 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
417 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
418 'repo'))
418 'repo'))
419
419
420 filecount, bytecount, requirements = readbundle1header(fp)
420 filecount, bytecount, requirements = readbundle1header(fp)
421 missingreqs = requirements - repo.supportedformats
421 missingreqs = requirements - repo.supportedformats
422 if missingreqs:
422 if missingreqs:
423 raise error.Abort(_('unable to apply stream clone: '
423 raise error.Abort(_('unable to apply stream clone: '
424 'unsupported format: %s') %
424 'unsupported format: %s') %
425 ', '.join(sorted(missingreqs)))
425 ', '.join(sorted(missingreqs)))
426
426
427 consumev1(repo, fp, filecount, bytecount)
427 consumev1(repo, fp, filecount, bytecount)
428
428
429 class streamcloneapplier(object):
429 class streamcloneapplier(object):
430 """Class to manage applying streaming clone bundles.
430 """Class to manage applying streaming clone bundles.
431
431
432 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
432 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
433 readers to perform bundle type-specific functionality.
433 readers to perform bundle type-specific functionality.
434 """
434 """
435 def __init__(self, fh):
435 def __init__(self, fh):
436 self._fh = fh
436 self._fh = fh
437
437
438 def apply(self, repo):
438 def apply(self, repo):
439 return applybundlev1(repo, self._fh)
439 return applybundlev1(repo, self._fh)
440
440
441 # type of file to stream
441 # type of file to stream
442 _fileappend = 0 # append only file
442 _fileappend = 0 # append only file
443 _filefull = 1 # full snapshot file
443 _filefull = 1 # full snapshot file
444
444
445 # Source of the file
445 # Source of the file
446 _srcstore = 's' # store (svfs)
446 _srcstore = 's' # store (svfs)
447 _srccache = 'c' # cache (cache)
447 _srccache = 'c' # cache (cache)
448
448
449 # This is it's own function so extensions can override it.
449 # This is it's own function so extensions can override it.
450 def _walkstreamfullstorefiles(repo):
450 def _walkstreamfullstorefiles(repo):
451 """list snapshot file from the store"""
451 """list snapshot file from the store"""
452 fnames = []
452 fnames = []
453 if not repo.publishing():
453 if not repo.publishing():
454 fnames.append('phaseroots')
454 fnames.append('phaseroots')
455 return fnames
455 return fnames
456
456
457 def _filterfull(entry, copy, vfsmap):
457 def _filterfull(entry, copy, vfsmap):
458 """actually copy the snapshot files"""
458 """actually copy the snapshot files"""
459 src, name, ftype, data = entry
459 src, name, ftype, data = entry
460 if ftype != _filefull:
460 if ftype != _filefull:
461 return entry
461 return entry
462 return (src, name, ftype, copy(vfsmap[src].join(name)))
462 return (src, name, ftype, copy(vfsmap[src].join(name)))
463
463
464 @contextlib.contextmanager
464 @contextlib.contextmanager
465 def maketempcopies():
465 def maketempcopies():
466 """return a function to temporary copy file"""
466 """return a function to temporary copy file"""
467 files = []
467 files = []
468 try:
468 try:
469 def copy(src):
469 def copy(src):
470 fd, dst = pycompat.mkstemp()
470 fd, dst = pycompat.mkstemp()
471 os.close(fd)
471 os.close(fd)
472 files.append(dst)
472 files.append(dst)
473 util.copyfiles(src, dst, hardlink=True)
473 util.copyfiles(src, dst, hardlink=True)
474 return dst
474 return dst
475 yield copy
475 yield copy
476 finally:
476 finally:
477 for tmp in files:
477 for tmp in files:
478 util.tryunlink(tmp)
478 util.tryunlink(tmp)
479
479
480 def _makemap(repo):
480 def _makemap(repo):
481 """make a (src -> vfs) map for the repo"""
481 """make a (src -> vfs) map for the repo"""
482 vfsmap = {
482 vfsmap = {
483 _srcstore: repo.svfs,
483 _srcstore: repo.svfs,
484 _srccache: repo.cachevfs,
484 _srccache: repo.cachevfs,
485 }
485 }
486 # we keep repo.vfs out of the on purpose, ther are too many danger there
486 # we keep repo.vfs out of the on purpose, ther are too many danger there
487 # (eg: .hg/hgrc)
487 # (eg: .hg/hgrc)
488 assert repo.vfs not in vfsmap.values()
488 assert repo.vfs not in vfsmap.values()
489
489
490 return vfsmap
490 return vfsmap
491
491
492 def _emit2(repo, entries, totalfilesize):
492 def _emit2(repo, entries, totalfilesize):
493 """actually emit the stream bundle"""
493 """actually emit the stream bundle"""
494 vfsmap = _makemap(repo)
494 vfsmap = _makemap(repo)
495 progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize,
495 progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize,
496 unit=_('bytes'))
496 unit=_('bytes'))
497 progress.update(0)
497 progress.update(0)
498 with maketempcopies() as copy:
498 with maketempcopies() as copy, progress:
499 try:
499 # copy is delayed until we are in the try
500 # copy is delayed until we are in the try
500 entries = [_filterfull(e, copy, vfsmap) for e in entries]
501 entries = [_filterfull(e, copy, vfsmap) for e in entries]
501 yield None # this release the lock on the repository
502 yield None # this release the lock on the repository
502 seen = 0
503 seen = 0
504
503
505 for src, name, ftype, data in entries:
504 for src, name, ftype, data in entries:
506 vfs = vfsmap[src]
505 vfs = vfsmap[src]
507 yield src
506 yield src
508 yield util.uvarintencode(len(name))
507 yield util.uvarintencode(len(name))
509 if ftype == _fileappend:
508 if ftype == _fileappend:
510 fp = vfs(name)
509 fp = vfs(name)
511 size = data
510 size = data
512 elif ftype == _filefull:
511 elif ftype == _filefull:
513 fp = open(data, 'rb')
512 fp = open(data, 'rb')
514 size = util.fstat(fp).st_size
513 size = util.fstat(fp).st_size
515 try:
514 try:
516 yield util.uvarintencode(size)
515 yield util.uvarintencode(size)
517 yield name
516 yield name
518 if size <= 65536:
517 if size <= 65536:
519 chunks = (fp.read(size),)
518 chunks = (fp.read(size),)
520 else:
519 else:
521 chunks = util.filechunkiter(fp, limit=size)
520 chunks = util.filechunkiter(fp, limit=size)
522 for chunk in chunks:
521 for chunk in chunks:
523 seen += len(chunk)
522 seen += len(chunk)
524 progress.update(seen)
523 progress.update(seen)
525 yield chunk
524 yield chunk
526 finally:
525 finally:
527 fp.close()
526 fp.close()
528 finally:
529 progress.complete()
530
527
531 def generatev2(repo):
528 def generatev2(repo):
532 """Emit content for version 2 of a streaming clone.
529 """Emit content for version 2 of a streaming clone.
533
530
534 the data stream consists the following entries:
531 the data stream consists the following entries:
535 1) A char representing the file destination (eg: store or cache)
532 1) A char representing the file destination (eg: store or cache)
536 2) A varint containing the length of the filename
533 2) A varint containing the length of the filename
537 3) A varint containing the length of file data
534 3) A varint containing the length of file data
538 4) N bytes containing the filename (the internal, store-agnostic form)
535 4) N bytes containing the filename (the internal, store-agnostic form)
539 5) N bytes containing the file data
536 5) N bytes containing the file data
540
537
541 Returns a 3-tuple of (file count, file size, data iterator).
538 Returns a 3-tuple of (file count, file size, data iterator).
542 """
539 """
543
540
544 with repo.lock():
541 with repo.lock():
545
542
546 entries = []
543 entries = []
547 totalfilesize = 0
544 totalfilesize = 0
548
545
549 repo.ui.debug('scanning\n')
546 repo.ui.debug('scanning\n')
550 for name, ename, size in _walkstreamfiles(repo):
547 for name, ename, size in _walkstreamfiles(repo):
551 if size:
548 if size:
552 entries.append((_srcstore, name, _fileappend, size))
549 entries.append((_srcstore, name, _fileappend, size))
553 totalfilesize += size
550 totalfilesize += size
554 for name in _walkstreamfullstorefiles(repo):
551 for name in _walkstreamfullstorefiles(repo):
555 if repo.svfs.exists(name):
552 if repo.svfs.exists(name):
556 totalfilesize += repo.svfs.lstat(name).st_size
553 totalfilesize += repo.svfs.lstat(name).st_size
557 entries.append((_srcstore, name, _filefull, None))
554 entries.append((_srcstore, name, _filefull, None))
558 for name in cacheutil.cachetocopy(repo):
555 for name in cacheutil.cachetocopy(repo):
559 if repo.cachevfs.exists(name):
556 if repo.cachevfs.exists(name):
560 totalfilesize += repo.cachevfs.lstat(name).st_size
557 totalfilesize += repo.cachevfs.lstat(name).st_size
561 entries.append((_srccache, name, _filefull, None))
558 entries.append((_srccache, name, _filefull, None))
562
559
563 chunks = _emit2(repo, entries, totalfilesize)
560 chunks = _emit2(repo, entries, totalfilesize)
564 first = next(chunks)
561 first = next(chunks)
565 assert first is None
562 assert first is None
566
563
567 return len(entries), totalfilesize, chunks
564 return len(entries), totalfilesize, chunks
568
565
569 @contextlib.contextmanager
566 @contextlib.contextmanager
570 def nested(*ctxs):
567 def nested(*ctxs):
571 with warnings.catch_warnings():
568 with warnings.catch_warnings():
572 # For some reason, Python decided 'nested' was deprecated without
569 # For some reason, Python decided 'nested' was deprecated without
573 # replacement. They officially advertised for filtering the deprecation
570 # replacement. They officially advertised for filtering the deprecation
574 # warning for people who actually need the feature.
571 # warning for people who actually need the feature.
575 warnings.filterwarnings("ignore",category=DeprecationWarning)
572 warnings.filterwarnings("ignore",category=DeprecationWarning)
576 with contextlib.nested(*ctxs):
573 with contextlib.nested(*ctxs):
577 yield
574 yield
578
575
579 def consumev2(repo, fp, filecount, filesize):
576 def consumev2(repo, fp, filecount, filesize):
580 """Apply the contents from a version 2 streaming clone.
577 """Apply the contents from a version 2 streaming clone.
581
578
582 Data is read from an object that only needs to provide a ``read(size)``
579 Data is read from an object that only needs to provide a ``read(size)``
583 method.
580 method.
584 """
581 """
585 with repo.lock():
582 with repo.lock():
586 repo.ui.status(_('%d files to transfer, %s of data\n') %
583 repo.ui.status(_('%d files to transfer, %s of data\n') %
587 (filecount, util.bytecount(filesize)))
584 (filecount, util.bytecount(filesize)))
588
585
589 start = util.timer()
586 start = util.timer()
590 progress = repo.ui.makeprogress(_('clone'), total=filesize,
587 progress = repo.ui.makeprogress(_('clone'), total=filesize,
591 unit=_('bytes'))
588 unit=_('bytes'))
592 progress.update(0)
589 progress.update(0)
593
590
594 vfsmap = _makemap(repo)
591 vfsmap = _makemap(repo)
595
592
596 with repo.transaction('clone'):
593 with repo.transaction('clone'):
597 ctxs = (vfs.backgroundclosing(repo.ui)
594 ctxs = (vfs.backgroundclosing(repo.ui)
598 for vfs in vfsmap.values())
595 for vfs in vfsmap.values())
599 with nested(*ctxs):
596 with nested(*ctxs):
600 for i in range(filecount):
597 for i in range(filecount):
601 src = util.readexactly(fp, 1)
598 src = util.readexactly(fp, 1)
602 vfs = vfsmap[src]
599 vfs = vfsmap[src]
603 namelen = util.uvarintdecodestream(fp)
600 namelen = util.uvarintdecodestream(fp)
604 datalen = util.uvarintdecodestream(fp)
601 datalen = util.uvarintdecodestream(fp)
605
602
606 name = util.readexactly(fp, namelen)
603 name = util.readexactly(fp, namelen)
607
604
608 if repo.ui.debugflag:
605 if repo.ui.debugflag:
609 repo.ui.debug('adding [%s] %s (%s)\n' %
606 repo.ui.debug('adding [%s] %s (%s)\n' %
610 (src, name, util.bytecount(datalen)))
607 (src, name, util.bytecount(datalen)))
611
608
612 with vfs(name, 'w') as ofp:
609 with vfs(name, 'w') as ofp:
613 for chunk in util.filechunkiter(fp, limit=datalen):
610 for chunk in util.filechunkiter(fp, limit=datalen):
614 progress.increment(step=len(chunk))
611 progress.increment(step=len(chunk))
615 ofp.write(chunk)
612 ofp.write(chunk)
616
613
617 # force @filecache properties to be reloaded from
614 # force @filecache properties to be reloaded from
618 # streamclone-ed file at next access
615 # streamclone-ed file at next access
619 repo.invalidate(clearfilecache=True)
616 repo.invalidate(clearfilecache=True)
620
617
621 elapsed = util.timer() - start
618 elapsed = util.timer() - start
622 if elapsed <= 0:
619 if elapsed <= 0:
623 elapsed = 0.001
620 elapsed = 0.001
624 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
621 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
625 (util.bytecount(progress.pos), elapsed,
622 (util.bytecount(progress.pos), elapsed,
626 util.bytecount(progress.pos / elapsed)))
623 util.bytecount(progress.pos / elapsed)))
627 progress.complete()
624 progress.complete()
628
625
629 def applybundlev2(repo, fp, filecount, filesize, requirements):
626 def applybundlev2(repo, fp, filecount, filesize, requirements):
630 missingreqs = [r for r in requirements if r not in repo.supported]
627 missingreqs = [r for r in requirements if r not in repo.supported]
631 if missingreqs:
628 if missingreqs:
632 raise error.Abort(_('unable to apply stream clone: '
629 raise error.Abort(_('unable to apply stream clone: '
633 'unsupported format: %s') %
630 'unsupported format: %s') %
634 ', '.join(sorted(missingreqs)))
631 ', '.join(sorted(missingreqs)))
635
632
636 consumev2(repo, fp, filecount, filesize)
633 consumev2(repo, fp, filecount, filesize)
637
634
638 # new requirements = old non-format requirements +
635 # new requirements = old non-format requirements +
639 # new format-related remote requirements
636 # new format-related remote requirements
640 # requirements from the streamed-in repository
637 # requirements from the streamed-in repository
641 repo.requirements = set(requirements) | (
638 repo.requirements = set(requirements) | (
642 repo.requirements - repo.supportedformats)
639 repo.requirements - repo.supportedformats)
643 repo._applyopenerreqs()
640 repo._applyopenerreqs()
644 repo._writerequirements()
641 repo._writerequirements()
General Comments 0
You need to be logged in to leave comments. Login now