##// END OF EJS Templates
dirstate: add a function to update tracking status while "moving" parents...
marmoute -
r48392:0f5c203e default
parent child Browse files
Show More
@@ -1,1448 +1,1497
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
82
83 return wrap
84
85
75 @interfaceutil.implementer(intdirstate.idirstate)
86 @interfaceutil.implementer(intdirstate.idirstate)
76 class dirstate(object):
87 class dirstate(object):
77 def __init__(
88 def __init__(
78 self,
89 self,
79 opener,
90 opener,
80 ui,
91 ui,
81 root,
92 root,
82 validate,
93 validate,
83 sparsematchfn,
94 sparsematchfn,
84 nodeconstants,
95 nodeconstants,
85 use_dirstate_v2,
96 use_dirstate_v2,
86 ):
97 ):
87 """Create a new dirstate object.
98 """Create a new dirstate object.
88
99
89 opener is an open()-like callable that can be used to open the
100 opener is an open()-like callable that can be used to open the
90 dirstate file; root is the root of the directory tracked by
101 dirstate file; root is the root of the directory tracked by
91 the dirstate.
102 the dirstate.
92 """
103 """
93 self._use_dirstate_v2 = use_dirstate_v2
104 self._use_dirstate_v2 = use_dirstate_v2
94 self._nodeconstants = nodeconstants
105 self._nodeconstants = nodeconstants
95 self._opener = opener
106 self._opener = opener
96 self._validate = validate
107 self._validate = validate
97 self._root = root
108 self._root = root
98 self._sparsematchfn = sparsematchfn
109 self._sparsematchfn = sparsematchfn
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
110 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 # UNC path pointing to root share (issue4557)
111 # UNC path pointing to root share (issue4557)
101 self._rootdir = pathutil.normasprefix(root)
112 self._rootdir = pathutil.normasprefix(root)
102 self._dirty = False
113 self._dirty = False
103 self._lastnormaltime = 0
114 self._lastnormaltime = 0
104 self._ui = ui
115 self._ui = ui
105 self._filecache = {}
116 self._filecache = {}
106 self._parentwriters = 0
117 self._parentwriters = 0
107 self._filename = b'dirstate'
118 self._filename = b'dirstate'
108 self._pendingfilename = b'%s.pending' % self._filename
119 self._pendingfilename = b'%s.pending' % self._filename
109 self._plchangecallbacks = {}
120 self._plchangecallbacks = {}
110 self._origpl = None
121 self._origpl = None
111 self._updatedfiles = set()
122 self._updatedfiles = set()
112 self._mapcls = dirstatemap.dirstatemap
123 self._mapcls = dirstatemap.dirstatemap
113 # Access and cache cwd early, so we don't access it for the first time
124 # Access and cache cwd early, so we don't access it for the first time
114 # after a working-copy update caused it to not exist (accessing it then
125 # after a working-copy update caused it to not exist (accessing it then
115 # raises an exception).
126 # raises an exception).
116 self._cwd
127 self._cwd
117
128
118 def prefetch_parents(self):
129 def prefetch_parents(self):
119 """make sure the parents are loaded
130 """make sure the parents are loaded
120
131
121 Used to avoid a race condition.
132 Used to avoid a race condition.
122 """
133 """
123 self._pl
134 self._pl
124
135
125 @contextlib.contextmanager
136 @contextlib.contextmanager
126 def parentchange(self):
137 def parentchange(self):
127 """Context manager for handling dirstate parents.
138 """Context manager for handling dirstate parents.
128
139
129 If an exception occurs in the scope of the context manager,
140 If an exception occurs in the scope of the context manager,
130 the incoherent dirstate won't be written when wlock is
141 the incoherent dirstate won't be written when wlock is
131 released.
142 released.
132 """
143 """
133 self._parentwriters += 1
144 self._parentwriters += 1
134 yield
145 yield
135 # Typically we want the "undo" step of a context manager in a
146 # Typically we want the "undo" step of a context manager in a
136 # finally block so it happens even when an exception
147 # finally block so it happens even when an exception
137 # occurs. In this case, however, we only want to decrement
148 # occurs. In this case, however, we only want to decrement
138 # parentwriters if the code in the with statement exits
149 # parentwriters if the code in the with statement exits
139 # normally, so we don't have a try/finally here on purpose.
150 # normally, so we don't have a try/finally here on purpose.
140 self._parentwriters -= 1
151 self._parentwriters -= 1
141
152
142 def pendingparentchange(self):
153 def pendingparentchange(self):
143 """Returns true if the dirstate is in the middle of a set of changes
154 """Returns true if the dirstate is in the middle of a set of changes
144 that modify the dirstate parent.
155 that modify the dirstate parent.
145 """
156 """
146 return self._parentwriters > 0
157 return self._parentwriters > 0
147
158
148 @propertycache
159 @propertycache
149 def _map(self):
160 def _map(self):
150 """Return the dirstate contents (see documentation for dirstatemap)."""
161 """Return the dirstate contents (see documentation for dirstatemap)."""
151 self._map = self._mapcls(
162 self._map = self._mapcls(
152 self._ui,
163 self._ui,
153 self._opener,
164 self._opener,
154 self._root,
165 self._root,
155 self._nodeconstants,
166 self._nodeconstants,
156 self._use_dirstate_v2,
167 self._use_dirstate_v2,
157 )
168 )
158 return self._map
169 return self._map
159
170
160 @property
171 @property
161 def _sparsematcher(self):
172 def _sparsematcher(self):
162 """The matcher for the sparse checkout.
173 """The matcher for the sparse checkout.
163
174
164 The working directory may not include every file from a manifest. The
175 The working directory may not include every file from a manifest. The
165 matcher obtained by this property will match a path if it is to be
176 matcher obtained by this property will match a path if it is to be
166 included in the working directory.
177 included in the working directory.
167 """
178 """
168 # TODO there is potential to cache this property. For now, the matcher
179 # TODO there is potential to cache this property. For now, the matcher
169 # is resolved on every access. (But the called function does use a
180 # is resolved on every access. (But the called function does use a
170 # cache to keep the lookup fast.)
181 # cache to keep the lookup fast.)
171 return self._sparsematchfn()
182 return self._sparsematchfn()
172
183
173 @repocache(b'branch')
184 @repocache(b'branch')
174 def _branch(self):
185 def _branch(self):
175 try:
186 try:
176 return self._opener.read(b"branch").strip() or b"default"
187 return self._opener.read(b"branch").strip() or b"default"
177 except IOError as inst:
188 except IOError as inst:
178 if inst.errno != errno.ENOENT:
189 if inst.errno != errno.ENOENT:
179 raise
190 raise
180 return b"default"
191 return b"default"
181
192
182 @property
193 @property
183 def _pl(self):
194 def _pl(self):
184 return self._map.parents()
195 return self._map.parents()
185
196
186 def hasdir(self, d):
197 def hasdir(self, d):
187 return self._map.hastrackeddir(d)
198 return self._map.hastrackeddir(d)
188
199
189 @rootcache(b'.hgignore')
200 @rootcache(b'.hgignore')
190 def _ignore(self):
201 def _ignore(self):
191 files = self._ignorefiles()
202 files = self._ignorefiles()
192 if not files:
203 if not files:
193 return matchmod.never()
204 return matchmod.never()
194
205
195 pats = [b'include:%s' % f for f in files]
206 pats = [b'include:%s' % f for f in files]
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
207 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197
208
198 @propertycache
209 @propertycache
199 def _slash(self):
210 def _slash(self):
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
211 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201
212
202 @propertycache
213 @propertycache
203 def _checklink(self):
214 def _checklink(self):
204 return util.checklink(self._root)
215 return util.checklink(self._root)
205
216
206 @propertycache
217 @propertycache
207 def _checkexec(self):
218 def _checkexec(self):
208 return bool(util.checkexec(self._root))
219 return bool(util.checkexec(self._root))
209
220
210 @propertycache
221 @propertycache
211 def _checkcase(self):
222 def _checkcase(self):
212 return not util.fscasesensitive(self._join(b'.hg'))
223 return not util.fscasesensitive(self._join(b'.hg'))
213
224
214 def _join(self, f):
225 def _join(self, f):
215 # much faster than os.path.join()
226 # much faster than os.path.join()
216 # it's safe because f is always a relative path
227 # it's safe because f is always a relative path
217 return self._rootdir + f
228 return self._rootdir + f
218
229
219 def flagfunc(self, buildfallback):
230 def flagfunc(self, buildfallback):
220 if self._checklink and self._checkexec:
231 if self._checklink and self._checkexec:
221
232
222 def f(x):
233 def f(x):
223 try:
234 try:
224 st = os.lstat(self._join(x))
235 st = os.lstat(self._join(x))
225 if util.statislink(st):
236 if util.statislink(st):
226 return b'l'
237 return b'l'
227 if util.statisexec(st):
238 if util.statisexec(st):
228 return b'x'
239 return b'x'
229 except OSError:
240 except OSError:
230 pass
241 pass
231 return b''
242 return b''
232
243
233 return f
244 return f
234
245
235 fallback = buildfallback()
246 fallback = buildfallback()
236 if self._checklink:
247 if self._checklink:
237
248
238 def f(x):
249 def f(x):
239 if os.path.islink(self._join(x)):
250 if os.path.islink(self._join(x)):
240 return b'l'
251 return b'l'
241 if b'x' in fallback(x):
252 if b'x' in fallback(x):
242 return b'x'
253 return b'x'
243 return b''
254 return b''
244
255
245 return f
256 return f
246 if self._checkexec:
257 if self._checkexec:
247
258
248 def f(x):
259 def f(x):
249 if b'l' in fallback(x):
260 if b'l' in fallback(x):
250 return b'l'
261 return b'l'
251 if util.isexec(self._join(x)):
262 if util.isexec(self._join(x)):
252 return b'x'
263 return b'x'
253 return b''
264 return b''
254
265
255 return f
266 return f
256 else:
267 else:
257 return fallback
268 return fallback
258
269
259 @propertycache
270 @propertycache
260 def _cwd(self):
271 def _cwd(self):
261 # internal config: ui.forcecwd
272 # internal config: ui.forcecwd
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
273 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 if forcecwd:
274 if forcecwd:
264 return forcecwd
275 return forcecwd
265 return encoding.getcwd()
276 return encoding.getcwd()
266
277
267 def getcwd(self):
278 def getcwd(self):
268 """Return the path from which a canonical path is calculated.
279 """Return the path from which a canonical path is calculated.
269
280
270 This path should be used to resolve file patterns or to convert
281 This path should be used to resolve file patterns or to convert
271 canonical paths back to file paths for display. It shouldn't be
282 canonical paths back to file paths for display. It shouldn't be
272 used to get real file paths. Use vfs functions instead.
283 used to get real file paths. Use vfs functions instead.
273 """
284 """
274 cwd = self._cwd
285 cwd = self._cwd
275 if cwd == self._root:
286 if cwd == self._root:
276 return b''
287 return b''
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
288 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 rootsep = self._root
289 rootsep = self._root
279 if not util.endswithsep(rootsep):
290 if not util.endswithsep(rootsep):
280 rootsep += pycompat.ossep
291 rootsep += pycompat.ossep
281 if cwd.startswith(rootsep):
292 if cwd.startswith(rootsep):
282 return cwd[len(rootsep) :]
293 return cwd[len(rootsep) :]
283 else:
294 else:
284 # we're outside the repo. return an absolute path.
295 # we're outside the repo. return an absolute path.
285 return cwd
296 return cwd
286
297
287 def pathto(self, f, cwd=None):
298 def pathto(self, f, cwd=None):
288 if cwd is None:
299 if cwd is None:
289 cwd = self.getcwd()
300 cwd = self.getcwd()
290 path = util.pathto(self._root, cwd, f)
301 path = util.pathto(self._root, cwd, f)
291 if self._slash:
302 if self._slash:
292 return util.pconvert(path)
303 return util.pconvert(path)
293 return path
304 return path
294
305
295 def __getitem__(self, key):
306 def __getitem__(self, key):
296 """Return the current state of key (a filename) in the dirstate.
307 """Return the current state of key (a filename) in the dirstate.
297
308
298 States are:
309 States are:
299 n normal
310 n normal
300 m needs merging
311 m needs merging
301 r marked for removal
312 r marked for removal
302 a marked for addition
313 a marked for addition
303 ? not tracked
314 ? not tracked
304
315
305 XXX The "state" is a bit obscure to be in the "public" API. we should
316 XXX The "state" is a bit obscure to be in the "public" API. we should
306 consider migrating all user of this to going through the dirstate entry
317 consider migrating all user of this to going through the dirstate entry
307 instead.
318 instead.
308 """
319 """
309 entry = self._map.get(key)
320 entry = self._map.get(key)
310 if entry is not None:
321 if entry is not None:
311 return entry.state
322 return entry.state
312 return b'?'
323 return b'?'
313
324
314 def __contains__(self, key):
325 def __contains__(self, key):
315 return key in self._map
326 return key in self._map
316
327
317 def __iter__(self):
328 def __iter__(self):
318 return iter(sorted(self._map))
329 return iter(sorted(self._map))
319
330
320 def items(self):
331 def items(self):
321 return pycompat.iteritems(self._map)
332 return pycompat.iteritems(self._map)
322
333
323 iteritems = items
334 iteritems = items
324
335
325 def directories(self):
336 def directories(self):
326 return self._map.directories()
337 return self._map.directories()
327
338
328 def parents(self):
339 def parents(self):
329 return [self._validate(p) for p in self._pl]
340 return [self._validate(p) for p in self._pl]
330
341
331 def p1(self):
342 def p1(self):
332 return self._validate(self._pl[0])
343 return self._validate(self._pl[0])
333
344
334 def p2(self):
345 def p2(self):
335 return self._validate(self._pl[1])
346 return self._validate(self._pl[1])
336
347
337 @property
348 @property
338 def in_merge(self):
349 def in_merge(self):
339 """True if a merge is in progress"""
350 """True if a merge is in progress"""
340 return self._pl[1] != self._nodeconstants.nullid
351 return self._pl[1] != self._nodeconstants.nullid
341
352
342 def branch(self):
353 def branch(self):
343 return encoding.tolocal(self._branch)
354 return encoding.tolocal(self._branch)
344
355
345 def setparents(self, p1, p2=None):
356 def setparents(self, p1, p2=None):
346 """Set dirstate parents to p1 and p2.
357 """Set dirstate parents to p1 and p2.
347
358
348 When moving from two parents to one, "merged" entries a
359 When moving from two parents to one, "merged" entries a
349 adjusted to normal and previous copy records discarded and
360 adjusted to normal and previous copy records discarded and
350 returned by the call.
361 returned by the call.
351
362
352 See localrepo.setparents()
363 See localrepo.setparents()
353 """
364 """
354 if p2 is None:
365 if p2 is None:
355 p2 = self._nodeconstants.nullid
366 p2 = self._nodeconstants.nullid
356 if self._parentwriters == 0:
367 if self._parentwriters == 0:
357 raise ValueError(
368 raise ValueError(
358 b"cannot set dirstate parent outside of "
369 b"cannot set dirstate parent outside of "
359 b"dirstate.parentchange context manager"
370 b"dirstate.parentchange context manager"
360 )
371 )
361
372
362 self._dirty = True
373 self._dirty = True
363 oldp2 = self._pl[1]
374 oldp2 = self._pl[1]
364 if self._origpl is None:
375 if self._origpl is None:
365 self._origpl = self._pl
376 self._origpl = self._pl
366 self._map.setparents(p1, p2)
377 self._map.setparents(p1, p2)
367 copies = {}
378 copies = {}
368 if (
379 if (
369 oldp2 != self._nodeconstants.nullid
380 oldp2 != self._nodeconstants.nullid
370 and p2 == self._nodeconstants.nullid
381 and p2 == self._nodeconstants.nullid
371 ):
382 ):
372 candidatefiles = self._map.non_normal_or_other_parent_paths()
383 candidatefiles = self._map.non_normal_or_other_parent_paths()
373
384
374 for f in candidatefiles:
385 for f in candidatefiles:
375 s = self._map.get(f)
386 s = self._map.get(f)
376 if s is None:
387 if s is None:
377 continue
388 continue
378
389
379 # Discard "merged" markers when moving away from a merge state
390 # Discard "merged" markers when moving away from a merge state
380 if s.merged:
391 if s.merged:
381 source = self._map.copymap.get(f)
392 source = self._map.copymap.get(f)
382 if source:
393 if source:
383 copies[f] = source
394 copies[f] = source
384 self.normallookup(f)
395 self.normallookup(f)
385 # Also fix up otherparent markers
396 # Also fix up otherparent markers
386 elif s.from_p2:
397 elif s.from_p2:
387 source = self._map.copymap.get(f)
398 source = self._map.copymap.get(f)
388 if source:
399 if source:
389 copies[f] = source
400 copies[f] = source
390 self._add(f)
401 self._add(f)
391 return copies
402 return copies
392
403
393 def setbranch(self, branch):
404 def setbranch(self, branch):
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
405 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
406 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 try:
407 try:
397 f.write(self._branch + b'\n')
408 f.write(self._branch + b'\n')
398 f.close()
409 f.close()
399
410
400 # make sure filecache has the correct stat info for _branch after
411 # make sure filecache has the correct stat info for _branch after
401 # replacing the underlying file
412 # replacing the underlying file
402 ce = self._filecache[b'_branch']
413 ce = self._filecache[b'_branch']
403 if ce:
414 if ce:
404 ce.refresh()
415 ce.refresh()
405 except: # re-raises
416 except: # re-raises
406 f.discard()
417 f.discard()
407 raise
418 raise
408
419
409 def invalidate(self):
420 def invalidate(self):
410 """Causes the next access to reread the dirstate.
421 """Causes the next access to reread the dirstate.
411
422
412 This is different from localrepo.invalidatedirstate() because it always
423 This is different from localrepo.invalidatedirstate() because it always
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
424 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 check whether the dirstate has changed before rereading it."""
425 check whether the dirstate has changed before rereading it."""
415
426
416 for a in ("_map", "_branch", "_ignore"):
427 for a in ("_map", "_branch", "_ignore"):
417 if a in self.__dict__:
428 if a in self.__dict__:
418 delattr(self, a)
429 delattr(self, a)
419 self._lastnormaltime = 0
430 self._lastnormaltime = 0
420 self._dirty = False
431 self._dirty = False
421 self._updatedfiles.clear()
432 self._updatedfiles.clear()
422 self._parentwriters = 0
433 self._parentwriters = 0
423 self._origpl = None
434 self._origpl = None
424
435
425 def copy(self, source, dest):
436 def copy(self, source, dest):
426 """Mark dest as a copy of source. Unmark dest if source is None."""
437 """Mark dest as a copy of source. Unmark dest if source is None."""
427 if source == dest:
438 if source == dest:
428 return
439 return
429 self._dirty = True
440 self._dirty = True
430 if source is not None:
441 if source is not None:
431 self._map.copymap[dest] = source
442 self._map.copymap[dest] = source
432 self._updatedfiles.add(source)
443 self._updatedfiles.add(source)
433 self._updatedfiles.add(dest)
444 self._updatedfiles.add(dest)
434 elif self._map.copymap.pop(dest, None):
445 elif self._map.copymap.pop(dest, None):
435 self._updatedfiles.add(dest)
446 self._updatedfiles.add(dest)
436
447
437 def copied(self, file):
448 def copied(self, file):
438 return self._map.copymap.get(file, None)
449 return self._map.copymap.get(file, None)
439
450
440 def copies(self):
451 def copies(self):
441 return self._map.copymap
452 return self._map.copymap
442
453
454 @requires_parents_change
455 def update_file_reference(
456 self,
457 filename,
458 p1_tracked,
459 ):
460 """Set a file as tracked in the parent (or not)
461
462 This is to be called when adjust the dirstate to a new parent after an history
463 rewriting operation.
464
465 It should not be called during a merge (p2 != nullid) and only within
466 a `with dirstate.parentchange():` context.
467 """
468 if self.in_merge:
469 msg = b'update_file_reference should not be called when merging'
470 raise error.ProgrammingError(msg)
471 entry = self._map.get(filename)
472 if entry is None:
473 wc_tracked = False
474 else:
475 wc_tracked = entry.tracked
476 if p1_tracked and wc_tracked:
477 # the underlying reference might have changed, we will have to
478 # check it.
479 self.normallookup(filename)
480 elif not (p1_tracked or wc_tracked):
481 # the file is no longer relevant to anyone
482 self._drop(filename)
483 elif (not p1_tracked) and wc_tracked:
484 if not entry.added:
485 self._add(filename)
486 elif p1_tracked and not wc_tracked:
487 if entry is None or not entry.removed:
488 self._remove(filename)
489 else:
490 assert False, 'unreachable'
491
443 def _addpath(
492 def _addpath(
444 self,
493 self,
445 f,
494 f,
446 mode=0,
495 mode=0,
447 size=None,
496 size=None,
448 mtime=None,
497 mtime=None,
449 added=False,
498 added=False,
450 merged=False,
499 merged=False,
451 from_p2=False,
500 from_p2=False,
452 possibly_dirty=False,
501 possibly_dirty=False,
453 ):
502 ):
454 entry = self._map.get(f)
503 entry = self._map.get(f)
455 if added or entry is not None and entry.removed:
504 if added or entry is not None and entry.removed:
456 scmutil.checkfilename(f)
505 scmutil.checkfilename(f)
457 if self._map.hastrackeddir(f):
506 if self._map.hastrackeddir(f):
458 msg = _(b'directory %r already in dirstate')
507 msg = _(b'directory %r already in dirstate')
459 msg %= pycompat.bytestr(f)
508 msg %= pycompat.bytestr(f)
460 raise error.Abort(msg)
509 raise error.Abort(msg)
461 # shadows
510 # shadows
462 for d in pathutil.finddirs(f):
511 for d in pathutil.finddirs(f):
463 if self._map.hastrackeddir(d):
512 if self._map.hastrackeddir(d):
464 break
513 break
465 entry = self._map.get(d)
514 entry = self._map.get(d)
466 if entry is not None and not entry.removed:
515 if entry is not None and not entry.removed:
467 msg = _(b'file %r in dirstate clashes with %r')
516 msg = _(b'file %r in dirstate clashes with %r')
468 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
517 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
469 raise error.Abort(msg)
518 raise error.Abort(msg)
470 self._dirty = True
519 self._dirty = True
471 self._updatedfiles.add(f)
520 self._updatedfiles.add(f)
472 self._map.addfile(
521 self._map.addfile(
473 f,
522 f,
474 mode=mode,
523 mode=mode,
475 size=size,
524 size=size,
476 mtime=mtime,
525 mtime=mtime,
477 added=added,
526 added=added,
478 merged=merged,
527 merged=merged,
479 from_p2=from_p2,
528 from_p2=from_p2,
480 possibly_dirty=possibly_dirty,
529 possibly_dirty=possibly_dirty,
481 )
530 )
482
531
483 def normal(self, f, parentfiledata=None):
532 def normal(self, f, parentfiledata=None):
484 """Mark a file normal and clean.
533 """Mark a file normal and clean.
485
534
486 parentfiledata: (mode, size, mtime) of the clean file
535 parentfiledata: (mode, size, mtime) of the clean file
487
536
488 parentfiledata should be computed from memory (for mode,
537 parentfiledata should be computed from memory (for mode,
489 size), as or close as possible from the point where we
538 size), as or close as possible from the point where we
490 determined the file was clean, to limit the risk of the
539 determined the file was clean, to limit the risk of the
491 file having been changed by an external process between the
540 file having been changed by an external process between the
492 moment where the file was determined to be clean and now."""
541 moment where the file was determined to be clean and now."""
493 if parentfiledata:
542 if parentfiledata:
494 (mode, size, mtime) = parentfiledata
543 (mode, size, mtime) = parentfiledata
495 else:
544 else:
496 s = os.lstat(self._join(f))
545 s = os.lstat(self._join(f))
497 mode = s.st_mode
546 mode = s.st_mode
498 size = s.st_size
547 size = s.st_size
499 mtime = s[stat.ST_MTIME]
548 mtime = s[stat.ST_MTIME]
500 self._addpath(f, mode=mode, size=size, mtime=mtime)
549 self._addpath(f, mode=mode, size=size, mtime=mtime)
501 self._map.copymap.pop(f, None)
550 self._map.copymap.pop(f, None)
502 if f in self._map.nonnormalset:
551 if f in self._map.nonnormalset:
503 self._map.nonnormalset.remove(f)
552 self._map.nonnormalset.remove(f)
504 if mtime > self._lastnormaltime:
553 if mtime > self._lastnormaltime:
505 # Remember the most recent modification timeslot for status(),
554 # Remember the most recent modification timeslot for status(),
506 # to make sure we won't miss future size-preserving file content
555 # to make sure we won't miss future size-preserving file content
507 # modifications that happen within the same timeslot.
556 # modifications that happen within the same timeslot.
508 self._lastnormaltime = mtime
557 self._lastnormaltime = mtime
509
558
510 def normallookup(self, f):
559 def normallookup(self, f):
511 '''Mark a file normal, but possibly dirty.'''
560 '''Mark a file normal, but possibly dirty.'''
512 if self.in_merge:
561 if self.in_merge:
513 # if there is a merge going on and the file was either
562 # if there is a merge going on and the file was either
514 # "merged" or coming from other parent (-2) before
563 # "merged" or coming from other parent (-2) before
515 # being removed, restore that state.
564 # being removed, restore that state.
516 entry = self._map.get(f)
565 entry = self._map.get(f)
517 if entry is not None:
566 if entry is not None:
518 # XXX this should probably be dealt with a a lower level
567 # XXX this should probably be dealt with a a lower level
519 # (see `merged_removed` and `from_p2_removed`)
568 # (see `merged_removed` and `from_p2_removed`)
520 if entry.merged_removed or entry.from_p2_removed:
569 if entry.merged_removed or entry.from_p2_removed:
521 source = self._map.copymap.get(f)
570 source = self._map.copymap.get(f)
522 if entry.merged_removed:
571 if entry.merged_removed:
523 self.merge(f)
572 self.merge(f)
524 elif entry.from_p2_removed:
573 elif entry.from_p2_removed:
525 self.otherparent(f)
574 self.otherparent(f)
526 if source is not None:
575 if source is not None:
527 self.copy(source, f)
576 self.copy(source, f)
528 return
577 return
529 elif entry.merged or entry.from_p2:
578 elif entry.merged or entry.from_p2:
530 return
579 return
531 self._addpath(f, possibly_dirty=True)
580 self._addpath(f, possibly_dirty=True)
532 self._map.copymap.pop(f, None)
581 self._map.copymap.pop(f, None)
533
582
534 def otherparent(self, f):
583 def otherparent(self, f):
535 '''Mark as coming from the other parent, always dirty.'''
584 '''Mark as coming from the other parent, always dirty.'''
536 if not self.in_merge:
585 if not self.in_merge:
537 msg = _(b"setting %r to other parent only allowed in merges") % f
586 msg = _(b"setting %r to other parent only allowed in merges") % f
538 raise error.Abort(msg)
587 raise error.Abort(msg)
539 entry = self._map.get(f)
588 entry = self._map.get(f)
540 if entry is not None and entry.tracked:
589 if entry is not None and entry.tracked:
541 # merge-like
590 # merge-like
542 self._addpath(f, merged=True)
591 self._addpath(f, merged=True)
543 else:
592 else:
544 # add-like
593 # add-like
545 self._addpath(f, from_p2=True)
594 self._addpath(f, from_p2=True)
546 self._map.copymap.pop(f, None)
595 self._map.copymap.pop(f, None)
547
596
548 def add(self, f):
597 def add(self, f):
549 '''Mark a file added.'''
598 '''Mark a file added.'''
550 self._add(f)
599 self._add(f)
551
600
552 def _add(self, filename):
601 def _add(self, filename):
553 """internal function to mark a file as added"""
602 """internal function to mark a file as added"""
554 self._addpath(filename, added=True)
603 self._addpath(filename, added=True)
555 self._map.copymap.pop(filename, None)
604 self._map.copymap.pop(filename, None)
556
605
557 def remove(self, f):
606 def remove(self, f):
558 '''Mark a file removed'''
607 '''Mark a file removed'''
559 self._remove(f)
608 self._remove(f)
560
609
561 def _remove(self, filename):
610 def _remove(self, filename):
562 """internal function to mark a file removed"""
611 """internal function to mark a file removed"""
563 self._dirty = True
612 self._dirty = True
564 self._updatedfiles.add(filename)
613 self._updatedfiles.add(filename)
565 self._map.removefile(filename, in_merge=self.in_merge)
614 self._map.removefile(filename, in_merge=self.in_merge)
566
615
567 def merge(self, f):
616 def merge(self, f):
568 '''Mark a file merged.'''
617 '''Mark a file merged.'''
569 if not self.in_merge:
618 if not self.in_merge:
570 return self.normallookup(f)
619 return self.normallookup(f)
571 return self.otherparent(f)
620 return self.otherparent(f)
572
621
573 def drop(self, f):
622 def drop(self, f):
574 '''Drop a file from the dirstate'''
623 '''Drop a file from the dirstate'''
575 self._drop(f)
624 self._drop(f)
576
625
577 def _drop(self, filename):
626 def _drop(self, filename):
578 """internal function to drop a file from the dirstate"""
627 """internal function to drop a file from the dirstate"""
579 if self._map.dropfile(filename):
628 if self._map.dropfile(filename):
580 self._dirty = True
629 self._dirty = True
581 self._updatedfiles.add(filename)
630 self._updatedfiles.add(filename)
582 self._map.copymap.pop(filename, None)
631 self._map.copymap.pop(filename, None)
583
632
584 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
633 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
585 if exists is None:
634 if exists is None:
586 exists = os.path.lexists(os.path.join(self._root, path))
635 exists = os.path.lexists(os.path.join(self._root, path))
587 if not exists:
636 if not exists:
588 # Maybe a path component exists
637 # Maybe a path component exists
589 if not ignoremissing and b'/' in path:
638 if not ignoremissing and b'/' in path:
590 d, f = path.rsplit(b'/', 1)
639 d, f = path.rsplit(b'/', 1)
591 d = self._normalize(d, False, ignoremissing, None)
640 d = self._normalize(d, False, ignoremissing, None)
592 folded = d + b"/" + f
641 folded = d + b"/" + f
593 else:
642 else:
594 # No path components, preserve original case
643 # No path components, preserve original case
595 folded = path
644 folded = path
596 else:
645 else:
597 # recursively normalize leading directory components
646 # recursively normalize leading directory components
598 # against dirstate
647 # against dirstate
599 if b'/' in normed:
648 if b'/' in normed:
600 d, f = normed.rsplit(b'/', 1)
649 d, f = normed.rsplit(b'/', 1)
601 d = self._normalize(d, False, ignoremissing, True)
650 d = self._normalize(d, False, ignoremissing, True)
602 r = self._root + b"/" + d
651 r = self._root + b"/" + d
603 folded = d + b"/" + util.fspath(f, r)
652 folded = d + b"/" + util.fspath(f, r)
604 else:
653 else:
605 folded = util.fspath(normed, self._root)
654 folded = util.fspath(normed, self._root)
606 storemap[normed] = folded
655 storemap[normed] = folded
607
656
608 return folded
657 return folded
609
658
610 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
659 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
611 normed = util.normcase(path)
660 normed = util.normcase(path)
612 folded = self._map.filefoldmap.get(normed, None)
661 folded = self._map.filefoldmap.get(normed, None)
613 if folded is None:
662 if folded is None:
614 if isknown:
663 if isknown:
615 folded = path
664 folded = path
616 else:
665 else:
617 folded = self._discoverpath(
666 folded = self._discoverpath(
618 path, normed, ignoremissing, exists, self._map.filefoldmap
667 path, normed, ignoremissing, exists, self._map.filefoldmap
619 )
668 )
620 return folded
669 return folded
621
670
622 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
671 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
623 normed = util.normcase(path)
672 normed = util.normcase(path)
624 folded = self._map.filefoldmap.get(normed, None)
673 folded = self._map.filefoldmap.get(normed, None)
625 if folded is None:
674 if folded is None:
626 folded = self._map.dirfoldmap.get(normed, None)
675 folded = self._map.dirfoldmap.get(normed, None)
627 if folded is None:
676 if folded is None:
628 if isknown:
677 if isknown:
629 folded = path
678 folded = path
630 else:
679 else:
631 # store discovered result in dirfoldmap so that future
680 # store discovered result in dirfoldmap so that future
632 # normalizefile calls don't start matching directories
681 # normalizefile calls don't start matching directories
633 folded = self._discoverpath(
682 folded = self._discoverpath(
634 path, normed, ignoremissing, exists, self._map.dirfoldmap
683 path, normed, ignoremissing, exists, self._map.dirfoldmap
635 )
684 )
636 return folded
685 return folded
637
686
638 def normalize(self, path, isknown=False, ignoremissing=False):
687 def normalize(self, path, isknown=False, ignoremissing=False):
639 """
688 """
640 normalize the case of a pathname when on a casefolding filesystem
689 normalize the case of a pathname when on a casefolding filesystem
641
690
642 isknown specifies whether the filename came from walking the
691 isknown specifies whether the filename came from walking the
643 disk, to avoid extra filesystem access.
692 disk, to avoid extra filesystem access.
644
693
645 If ignoremissing is True, missing path are returned
694 If ignoremissing is True, missing path are returned
646 unchanged. Otherwise, we try harder to normalize possibly
695 unchanged. Otherwise, we try harder to normalize possibly
647 existing path components.
696 existing path components.
648
697
649 The normalized case is determined based on the following precedence:
698 The normalized case is determined based on the following precedence:
650
699
651 - version of name already stored in the dirstate
700 - version of name already stored in the dirstate
652 - version of name stored on disk
701 - version of name stored on disk
653 - version provided via command arguments
702 - version provided via command arguments
654 """
703 """
655
704
656 if self._checkcase:
705 if self._checkcase:
657 return self._normalize(path, isknown, ignoremissing)
706 return self._normalize(path, isknown, ignoremissing)
658 return path
707 return path
659
708
660 def clear(self):
709 def clear(self):
661 self._map.clear()
710 self._map.clear()
662 self._lastnormaltime = 0
711 self._lastnormaltime = 0
663 self._updatedfiles.clear()
712 self._updatedfiles.clear()
664 self._dirty = True
713 self._dirty = True
665
714
666 def rebuild(self, parent, allfiles, changedfiles=None):
715 def rebuild(self, parent, allfiles, changedfiles=None):
667 if changedfiles is None:
716 if changedfiles is None:
668 # Rebuild entire dirstate
717 # Rebuild entire dirstate
669 to_lookup = allfiles
718 to_lookup = allfiles
670 to_drop = []
719 to_drop = []
671 lastnormaltime = self._lastnormaltime
720 lastnormaltime = self._lastnormaltime
672 self.clear()
721 self.clear()
673 self._lastnormaltime = lastnormaltime
722 self._lastnormaltime = lastnormaltime
674 elif len(changedfiles) < 10:
723 elif len(changedfiles) < 10:
675 # Avoid turning allfiles into a set, which can be expensive if it's
724 # Avoid turning allfiles into a set, which can be expensive if it's
676 # large.
725 # large.
677 to_lookup = []
726 to_lookup = []
678 to_drop = []
727 to_drop = []
679 for f in changedfiles:
728 for f in changedfiles:
680 if f in allfiles:
729 if f in allfiles:
681 to_lookup.append(f)
730 to_lookup.append(f)
682 else:
731 else:
683 to_drop.append(f)
732 to_drop.append(f)
684 else:
733 else:
685 changedfilesset = set(changedfiles)
734 changedfilesset = set(changedfiles)
686 to_lookup = changedfilesset & set(allfiles)
735 to_lookup = changedfilesset & set(allfiles)
687 to_drop = changedfilesset - to_lookup
736 to_drop = changedfilesset - to_lookup
688
737
689 if self._origpl is None:
738 if self._origpl is None:
690 self._origpl = self._pl
739 self._origpl = self._pl
691 self._map.setparents(parent, self._nodeconstants.nullid)
740 self._map.setparents(parent, self._nodeconstants.nullid)
692
741
693 for f in to_lookup:
742 for f in to_lookup:
694 self.normallookup(f)
743 self.normallookup(f)
695 for f in to_drop:
744 for f in to_drop:
696 self._drop(f)
745 self._drop(f)
697
746
698 self._dirty = True
747 self._dirty = True
699
748
700 def identity(self):
749 def identity(self):
701 """Return identity of dirstate itself to detect changing in storage
750 """Return identity of dirstate itself to detect changing in storage
702
751
703 If identity of previous dirstate is equal to this, writing
752 If identity of previous dirstate is equal to this, writing
704 changes based on the former dirstate out can keep consistency.
753 changes based on the former dirstate out can keep consistency.
705 """
754 """
706 return self._map.identity
755 return self._map.identity
707
756
708 def write(self, tr):
757 def write(self, tr):
709 if not self._dirty:
758 if not self._dirty:
710 return
759 return
711
760
712 filename = self._filename
761 filename = self._filename
713 if tr:
762 if tr:
714 # 'dirstate.write()' is not only for writing in-memory
763 # 'dirstate.write()' is not only for writing in-memory
715 # changes out, but also for dropping ambiguous timestamp.
764 # changes out, but also for dropping ambiguous timestamp.
716 # delayed writing re-raise "ambiguous timestamp issue".
765 # delayed writing re-raise "ambiguous timestamp issue".
717 # See also the wiki page below for detail:
766 # See also the wiki page below for detail:
718 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
767 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
719
768
720 # emulate dropping timestamp in 'parsers.pack_dirstate'
769 # emulate dropping timestamp in 'parsers.pack_dirstate'
721 now = _getfsnow(self._opener)
770 now = _getfsnow(self._opener)
722 self._map.clearambiguoustimes(self._updatedfiles, now)
771 self._map.clearambiguoustimes(self._updatedfiles, now)
723
772
724 # emulate that all 'dirstate.normal' results are written out
773 # emulate that all 'dirstate.normal' results are written out
725 self._lastnormaltime = 0
774 self._lastnormaltime = 0
726 self._updatedfiles.clear()
775 self._updatedfiles.clear()
727
776
728 # delay writing in-memory changes out
777 # delay writing in-memory changes out
729 tr.addfilegenerator(
778 tr.addfilegenerator(
730 b'dirstate',
779 b'dirstate',
731 (self._filename,),
780 (self._filename,),
732 self._writedirstate,
781 self._writedirstate,
733 location=b'plain',
782 location=b'plain',
734 )
783 )
735 return
784 return
736
785
737 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
786 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
738 self._writedirstate(st)
787 self._writedirstate(st)
739
788
740 def addparentchangecallback(self, category, callback):
789 def addparentchangecallback(self, category, callback):
741 """add a callback to be called when the wd parents are changed
790 """add a callback to be called when the wd parents are changed
742
791
743 Callback will be called with the following arguments:
792 Callback will be called with the following arguments:
744 dirstate, (oldp1, oldp2), (newp1, newp2)
793 dirstate, (oldp1, oldp2), (newp1, newp2)
745
794
746 Category is a unique identifier to allow overwriting an old callback
795 Category is a unique identifier to allow overwriting an old callback
747 with a newer callback.
796 with a newer callback.
748 """
797 """
749 self._plchangecallbacks[category] = callback
798 self._plchangecallbacks[category] = callback
750
799
751 def _writedirstate(self, st):
800 def _writedirstate(self, st):
752 # notify callbacks about parents change
801 # notify callbacks about parents change
753 if self._origpl is not None and self._origpl != self._pl:
802 if self._origpl is not None and self._origpl != self._pl:
754 for c, callback in sorted(
803 for c, callback in sorted(
755 pycompat.iteritems(self._plchangecallbacks)
804 pycompat.iteritems(self._plchangecallbacks)
756 ):
805 ):
757 callback(self, self._origpl, self._pl)
806 callback(self, self._origpl, self._pl)
758 self._origpl = None
807 self._origpl = None
759 # use the modification time of the newly created temporary file as the
808 # use the modification time of the newly created temporary file as the
760 # filesystem's notion of 'now'
809 # filesystem's notion of 'now'
761 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
810 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
762
811
763 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
812 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
764 # timestamp of each entries in dirstate, because of 'now > mtime'
813 # timestamp of each entries in dirstate, because of 'now > mtime'
765 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
814 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
766 if delaywrite > 0:
815 if delaywrite > 0:
767 # do we have any files to delay for?
816 # do we have any files to delay for?
768 for f, e in pycompat.iteritems(self._map):
817 for f, e in pycompat.iteritems(self._map):
769 if e.need_delay(now):
818 if e.need_delay(now):
770 import time # to avoid useless import
819 import time # to avoid useless import
771
820
772 # rather than sleep n seconds, sleep until the next
821 # rather than sleep n seconds, sleep until the next
773 # multiple of n seconds
822 # multiple of n seconds
774 clock = time.time()
823 clock = time.time()
775 start = int(clock) - (int(clock) % delaywrite)
824 start = int(clock) - (int(clock) % delaywrite)
776 end = start + delaywrite
825 end = start + delaywrite
777 time.sleep(end - clock)
826 time.sleep(end - clock)
778 now = end # trust our estimate that the end is near now
827 now = end # trust our estimate that the end is near now
779 break
828 break
780
829
781 self._map.write(st, now)
830 self._map.write(st, now)
782 self._lastnormaltime = 0
831 self._lastnormaltime = 0
783 self._dirty = False
832 self._dirty = False
784
833
785 def _dirignore(self, f):
834 def _dirignore(self, f):
786 if self._ignore(f):
835 if self._ignore(f):
787 return True
836 return True
788 for p in pathutil.finddirs(f):
837 for p in pathutil.finddirs(f):
789 if self._ignore(p):
838 if self._ignore(p):
790 return True
839 return True
791 return False
840 return False
792
841
793 def _ignorefiles(self):
842 def _ignorefiles(self):
794 files = []
843 files = []
795 if os.path.exists(self._join(b'.hgignore')):
844 if os.path.exists(self._join(b'.hgignore')):
796 files.append(self._join(b'.hgignore'))
845 files.append(self._join(b'.hgignore'))
797 for name, path in self._ui.configitems(b"ui"):
846 for name, path in self._ui.configitems(b"ui"):
798 if name == b'ignore' or name.startswith(b'ignore.'):
847 if name == b'ignore' or name.startswith(b'ignore.'):
799 # we need to use os.path.join here rather than self._join
848 # we need to use os.path.join here rather than self._join
800 # because path is arbitrary and user-specified
849 # because path is arbitrary and user-specified
801 files.append(os.path.join(self._rootdir, util.expandpath(path)))
850 files.append(os.path.join(self._rootdir, util.expandpath(path)))
802 return files
851 return files
803
852
804 def _ignorefileandline(self, f):
853 def _ignorefileandline(self, f):
805 files = collections.deque(self._ignorefiles())
854 files = collections.deque(self._ignorefiles())
806 visited = set()
855 visited = set()
807 while files:
856 while files:
808 i = files.popleft()
857 i = files.popleft()
809 patterns = matchmod.readpatternfile(
858 patterns = matchmod.readpatternfile(
810 i, self._ui.warn, sourceinfo=True
859 i, self._ui.warn, sourceinfo=True
811 )
860 )
812 for pattern, lineno, line in patterns:
861 for pattern, lineno, line in patterns:
813 kind, p = matchmod._patsplit(pattern, b'glob')
862 kind, p = matchmod._patsplit(pattern, b'glob')
814 if kind == b"subinclude":
863 if kind == b"subinclude":
815 if p not in visited:
864 if p not in visited:
816 files.append(p)
865 files.append(p)
817 continue
866 continue
818 m = matchmod.match(
867 m = matchmod.match(
819 self._root, b'', [], [pattern], warn=self._ui.warn
868 self._root, b'', [], [pattern], warn=self._ui.warn
820 )
869 )
821 if m(f):
870 if m(f):
822 return (i, lineno, line)
871 return (i, lineno, line)
823 visited.add(i)
872 visited.add(i)
824 return (None, -1, b"")
873 return (None, -1, b"")
825
874
826 def _walkexplicit(self, match, subrepos):
875 def _walkexplicit(self, match, subrepos):
827 """Get stat data about the files explicitly specified by match.
876 """Get stat data about the files explicitly specified by match.
828
877
829 Return a triple (results, dirsfound, dirsnotfound).
878 Return a triple (results, dirsfound, dirsnotfound).
830 - results is a mapping from filename to stat result. It also contains
879 - results is a mapping from filename to stat result. It also contains
831 listings mapping subrepos and .hg to None.
880 listings mapping subrepos and .hg to None.
832 - dirsfound is a list of files found to be directories.
881 - dirsfound is a list of files found to be directories.
833 - dirsnotfound is a list of files that the dirstate thinks are
882 - dirsnotfound is a list of files that the dirstate thinks are
834 directories and that were not found."""
883 directories and that were not found."""
835
884
836 def badtype(mode):
885 def badtype(mode):
837 kind = _(b'unknown')
886 kind = _(b'unknown')
838 if stat.S_ISCHR(mode):
887 if stat.S_ISCHR(mode):
839 kind = _(b'character device')
888 kind = _(b'character device')
840 elif stat.S_ISBLK(mode):
889 elif stat.S_ISBLK(mode):
841 kind = _(b'block device')
890 kind = _(b'block device')
842 elif stat.S_ISFIFO(mode):
891 elif stat.S_ISFIFO(mode):
843 kind = _(b'fifo')
892 kind = _(b'fifo')
844 elif stat.S_ISSOCK(mode):
893 elif stat.S_ISSOCK(mode):
845 kind = _(b'socket')
894 kind = _(b'socket')
846 elif stat.S_ISDIR(mode):
895 elif stat.S_ISDIR(mode):
847 kind = _(b'directory')
896 kind = _(b'directory')
848 return _(b'unsupported file type (type is %s)') % kind
897 return _(b'unsupported file type (type is %s)') % kind
849
898
850 badfn = match.bad
899 badfn = match.bad
851 dmap = self._map
900 dmap = self._map
852 lstat = os.lstat
901 lstat = os.lstat
853 getkind = stat.S_IFMT
902 getkind = stat.S_IFMT
854 dirkind = stat.S_IFDIR
903 dirkind = stat.S_IFDIR
855 regkind = stat.S_IFREG
904 regkind = stat.S_IFREG
856 lnkkind = stat.S_IFLNK
905 lnkkind = stat.S_IFLNK
857 join = self._join
906 join = self._join
858 dirsfound = []
907 dirsfound = []
859 foundadd = dirsfound.append
908 foundadd = dirsfound.append
860 dirsnotfound = []
909 dirsnotfound = []
861 notfoundadd = dirsnotfound.append
910 notfoundadd = dirsnotfound.append
862
911
863 if not match.isexact() and self._checkcase:
912 if not match.isexact() and self._checkcase:
864 normalize = self._normalize
913 normalize = self._normalize
865 else:
914 else:
866 normalize = None
915 normalize = None
867
916
868 files = sorted(match.files())
917 files = sorted(match.files())
869 subrepos.sort()
918 subrepos.sort()
870 i, j = 0, 0
919 i, j = 0, 0
871 while i < len(files) and j < len(subrepos):
920 while i < len(files) and j < len(subrepos):
872 subpath = subrepos[j] + b"/"
921 subpath = subrepos[j] + b"/"
873 if files[i] < subpath:
922 if files[i] < subpath:
874 i += 1
923 i += 1
875 continue
924 continue
876 while i < len(files) and files[i].startswith(subpath):
925 while i < len(files) and files[i].startswith(subpath):
877 del files[i]
926 del files[i]
878 j += 1
927 j += 1
879
928
880 if not files or b'' in files:
929 if not files or b'' in files:
881 files = [b'']
930 files = [b'']
882 # constructing the foldmap is expensive, so don't do it for the
931 # constructing the foldmap is expensive, so don't do it for the
883 # common case where files is ['']
932 # common case where files is ['']
884 normalize = None
933 normalize = None
885 results = dict.fromkeys(subrepos)
934 results = dict.fromkeys(subrepos)
886 results[b'.hg'] = None
935 results[b'.hg'] = None
887
936
888 for ff in files:
937 for ff in files:
889 if normalize:
938 if normalize:
890 nf = normalize(ff, False, True)
939 nf = normalize(ff, False, True)
891 else:
940 else:
892 nf = ff
941 nf = ff
893 if nf in results:
942 if nf in results:
894 continue
943 continue
895
944
896 try:
945 try:
897 st = lstat(join(nf))
946 st = lstat(join(nf))
898 kind = getkind(st.st_mode)
947 kind = getkind(st.st_mode)
899 if kind == dirkind:
948 if kind == dirkind:
900 if nf in dmap:
949 if nf in dmap:
901 # file replaced by dir on disk but still in dirstate
950 # file replaced by dir on disk but still in dirstate
902 results[nf] = None
951 results[nf] = None
903 foundadd((nf, ff))
952 foundadd((nf, ff))
904 elif kind == regkind or kind == lnkkind:
953 elif kind == regkind or kind == lnkkind:
905 results[nf] = st
954 results[nf] = st
906 else:
955 else:
907 badfn(ff, badtype(kind))
956 badfn(ff, badtype(kind))
908 if nf in dmap:
957 if nf in dmap:
909 results[nf] = None
958 results[nf] = None
910 except OSError as inst: # nf not found on disk - it is dirstate only
959 except OSError as inst: # nf not found on disk - it is dirstate only
911 if nf in dmap: # does it exactly match a missing file?
960 if nf in dmap: # does it exactly match a missing file?
912 results[nf] = None
961 results[nf] = None
913 else: # does it match a missing directory?
962 else: # does it match a missing directory?
914 if self._map.hasdir(nf):
963 if self._map.hasdir(nf):
915 notfoundadd(nf)
964 notfoundadd(nf)
916 else:
965 else:
917 badfn(ff, encoding.strtolocal(inst.strerror))
966 badfn(ff, encoding.strtolocal(inst.strerror))
918
967
919 # match.files() may contain explicitly-specified paths that shouldn't
968 # match.files() may contain explicitly-specified paths that shouldn't
920 # be taken; drop them from the list of files found. dirsfound/notfound
969 # be taken; drop them from the list of files found. dirsfound/notfound
921 # aren't filtered here because they will be tested later.
970 # aren't filtered here because they will be tested later.
922 if match.anypats():
971 if match.anypats():
923 for f in list(results):
972 for f in list(results):
924 if f == b'.hg' or f in subrepos:
973 if f == b'.hg' or f in subrepos:
925 # keep sentinel to disable further out-of-repo walks
974 # keep sentinel to disable further out-of-repo walks
926 continue
975 continue
927 if not match(f):
976 if not match(f):
928 del results[f]
977 del results[f]
929
978
930 # Case insensitive filesystems cannot rely on lstat() failing to detect
979 # Case insensitive filesystems cannot rely on lstat() failing to detect
931 # a case-only rename. Prune the stat object for any file that does not
980 # a case-only rename. Prune the stat object for any file that does not
932 # match the case in the filesystem, if there are multiple files that
981 # match the case in the filesystem, if there are multiple files that
933 # normalize to the same path.
982 # normalize to the same path.
934 if match.isexact() and self._checkcase:
983 if match.isexact() and self._checkcase:
935 normed = {}
984 normed = {}
936
985
937 for f, st in pycompat.iteritems(results):
986 for f, st in pycompat.iteritems(results):
938 if st is None:
987 if st is None:
939 continue
988 continue
940
989
941 nc = util.normcase(f)
990 nc = util.normcase(f)
942 paths = normed.get(nc)
991 paths = normed.get(nc)
943
992
944 if paths is None:
993 if paths is None:
945 paths = set()
994 paths = set()
946 normed[nc] = paths
995 normed[nc] = paths
947
996
948 paths.add(f)
997 paths.add(f)
949
998
950 for norm, paths in pycompat.iteritems(normed):
999 for norm, paths in pycompat.iteritems(normed):
951 if len(paths) > 1:
1000 if len(paths) > 1:
952 for path in paths:
1001 for path in paths:
953 folded = self._discoverpath(
1002 folded = self._discoverpath(
954 path, norm, True, None, self._map.dirfoldmap
1003 path, norm, True, None, self._map.dirfoldmap
955 )
1004 )
956 if path != folded:
1005 if path != folded:
957 results[path] = None
1006 results[path] = None
958
1007
959 return results, dirsfound, dirsnotfound
1008 return results, dirsfound, dirsnotfound
960
1009
961 def walk(self, match, subrepos, unknown, ignored, full=True):
1010 def walk(self, match, subrepos, unknown, ignored, full=True):
962 """
1011 """
963 Walk recursively through the directory tree, finding all files
1012 Walk recursively through the directory tree, finding all files
964 matched by match.
1013 matched by match.
965
1014
966 If full is False, maybe skip some known-clean files.
1015 If full is False, maybe skip some known-clean files.
967
1016
968 Return a dict mapping filename to stat-like object (either
1017 Return a dict mapping filename to stat-like object (either
969 mercurial.osutil.stat instance or return value of os.stat()).
1018 mercurial.osutil.stat instance or return value of os.stat()).
970
1019
971 """
1020 """
972 # full is a flag that extensions that hook into walk can use -- this
1021 # full is a flag that extensions that hook into walk can use -- this
973 # implementation doesn't use it at all. This satisfies the contract
1022 # implementation doesn't use it at all. This satisfies the contract
974 # because we only guarantee a "maybe".
1023 # because we only guarantee a "maybe".
975
1024
976 if ignored:
1025 if ignored:
977 ignore = util.never
1026 ignore = util.never
978 dirignore = util.never
1027 dirignore = util.never
979 elif unknown:
1028 elif unknown:
980 ignore = self._ignore
1029 ignore = self._ignore
981 dirignore = self._dirignore
1030 dirignore = self._dirignore
982 else:
1031 else:
983 # if not unknown and not ignored, drop dir recursion and step 2
1032 # if not unknown and not ignored, drop dir recursion and step 2
984 ignore = util.always
1033 ignore = util.always
985 dirignore = util.always
1034 dirignore = util.always
986
1035
987 matchfn = match.matchfn
1036 matchfn = match.matchfn
988 matchalways = match.always()
1037 matchalways = match.always()
989 matchtdir = match.traversedir
1038 matchtdir = match.traversedir
990 dmap = self._map
1039 dmap = self._map
991 listdir = util.listdir
1040 listdir = util.listdir
992 lstat = os.lstat
1041 lstat = os.lstat
993 dirkind = stat.S_IFDIR
1042 dirkind = stat.S_IFDIR
994 regkind = stat.S_IFREG
1043 regkind = stat.S_IFREG
995 lnkkind = stat.S_IFLNK
1044 lnkkind = stat.S_IFLNK
996 join = self._join
1045 join = self._join
997
1046
998 exact = skipstep3 = False
1047 exact = skipstep3 = False
999 if match.isexact(): # match.exact
1048 if match.isexact(): # match.exact
1000 exact = True
1049 exact = True
1001 dirignore = util.always # skip step 2
1050 dirignore = util.always # skip step 2
1002 elif match.prefix(): # match.match, no patterns
1051 elif match.prefix(): # match.match, no patterns
1003 skipstep3 = True
1052 skipstep3 = True
1004
1053
1005 if not exact and self._checkcase:
1054 if not exact and self._checkcase:
1006 normalize = self._normalize
1055 normalize = self._normalize
1007 normalizefile = self._normalizefile
1056 normalizefile = self._normalizefile
1008 skipstep3 = False
1057 skipstep3 = False
1009 else:
1058 else:
1010 normalize = self._normalize
1059 normalize = self._normalize
1011 normalizefile = None
1060 normalizefile = None
1012
1061
1013 # step 1: find all explicit files
1062 # step 1: find all explicit files
1014 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1063 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1015 if matchtdir:
1064 if matchtdir:
1016 for d in work:
1065 for d in work:
1017 matchtdir(d[0])
1066 matchtdir(d[0])
1018 for d in dirsnotfound:
1067 for d in dirsnotfound:
1019 matchtdir(d)
1068 matchtdir(d)
1020
1069
1021 skipstep3 = skipstep3 and not (work or dirsnotfound)
1070 skipstep3 = skipstep3 and not (work or dirsnotfound)
1022 work = [d for d in work if not dirignore(d[0])]
1071 work = [d for d in work if not dirignore(d[0])]
1023
1072
1024 # step 2: visit subdirectories
1073 # step 2: visit subdirectories
1025 def traverse(work, alreadynormed):
1074 def traverse(work, alreadynormed):
1026 wadd = work.append
1075 wadd = work.append
1027 while work:
1076 while work:
1028 tracing.counter('dirstate.walk work', len(work))
1077 tracing.counter('dirstate.walk work', len(work))
1029 nd = work.pop()
1078 nd = work.pop()
1030 visitentries = match.visitchildrenset(nd)
1079 visitentries = match.visitchildrenset(nd)
1031 if not visitentries:
1080 if not visitentries:
1032 continue
1081 continue
1033 if visitentries == b'this' or visitentries == b'all':
1082 if visitentries == b'this' or visitentries == b'all':
1034 visitentries = None
1083 visitentries = None
1035 skip = None
1084 skip = None
1036 if nd != b'':
1085 if nd != b'':
1037 skip = b'.hg'
1086 skip = b'.hg'
1038 try:
1087 try:
1039 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1088 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1040 entries = listdir(join(nd), stat=True, skip=skip)
1089 entries = listdir(join(nd), stat=True, skip=skip)
1041 except OSError as inst:
1090 except OSError as inst:
1042 if inst.errno in (errno.EACCES, errno.ENOENT):
1091 if inst.errno in (errno.EACCES, errno.ENOENT):
1043 match.bad(
1092 match.bad(
1044 self.pathto(nd), encoding.strtolocal(inst.strerror)
1093 self.pathto(nd), encoding.strtolocal(inst.strerror)
1045 )
1094 )
1046 continue
1095 continue
1047 raise
1096 raise
1048 for f, kind, st in entries:
1097 for f, kind, st in entries:
1049 # Some matchers may return files in the visitentries set,
1098 # Some matchers may return files in the visitentries set,
1050 # instead of 'this', if the matcher explicitly mentions them
1099 # instead of 'this', if the matcher explicitly mentions them
1051 # and is not an exactmatcher. This is acceptable; we do not
1100 # and is not an exactmatcher. This is acceptable; we do not
1052 # make any hard assumptions about file-or-directory below
1101 # make any hard assumptions about file-or-directory below
1053 # based on the presence of `f` in visitentries. If
1102 # based on the presence of `f` in visitentries. If
1054 # visitchildrenset returned a set, we can always skip the
1103 # visitchildrenset returned a set, we can always skip the
1055 # entries *not* in the set it provided regardless of whether
1104 # entries *not* in the set it provided regardless of whether
1056 # they're actually a file or a directory.
1105 # they're actually a file or a directory.
1057 if visitentries and f not in visitentries:
1106 if visitentries and f not in visitentries:
1058 continue
1107 continue
1059 if normalizefile:
1108 if normalizefile:
1060 # even though f might be a directory, we're only
1109 # even though f might be a directory, we're only
1061 # interested in comparing it to files currently in the
1110 # interested in comparing it to files currently in the
1062 # dmap -- therefore normalizefile is enough
1111 # dmap -- therefore normalizefile is enough
1063 nf = normalizefile(
1112 nf = normalizefile(
1064 nd and (nd + b"/" + f) or f, True, True
1113 nd and (nd + b"/" + f) or f, True, True
1065 )
1114 )
1066 else:
1115 else:
1067 nf = nd and (nd + b"/" + f) or f
1116 nf = nd and (nd + b"/" + f) or f
1068 if nf not in results:
1117 if nf not in results:
1069 if kind == dirkind:
1118 if kind == dirkind:
1070 if not ignore(nf):
1119 if not ignore(nf):
1071 if matchtdir:
1120 if matchtdir:
1072 matchtdir(nf)
1121 matchtdir(nf)
1073 wadd(nf)
1122 wadd(nf)
1074 if nf in dmap and (matchalways or matchfn(nf)):
1123 if nf in dmap and (matchalways or matchfn(nf)):
1075 results[nf] = None
1124 results[nf] = None
1076 elif kind == regkind or kind == lnkkind:
1125 elif kind == regkind or kind == lnkkind:
1077 if nf in dmap:
1126 if nf in dmap:
1078 if matchalways or matchfn(nf):
1127 if matchalways or matchfn(nf):
1079 results[nf] = st
1128 results[nf] = st
1080 elif (matchalways or matchfn(nf)) and not ignore(
1129 elif (matchalways or matchfn(nf)) and not ignore(
1081 nf
1130 nf
1082 ):
1131 ):
1083 # unknown file -- normalize if necessary
1132 # unknown file -- normalize if necessary
1084 if not alreadynormed:
1133 if not alreadynormed:
1085 nf = normalize(nf, False, True)
1134 nf = normalize(nf, False, True)
1086 results[nf] = st
1135 results[nf] = st
1087 elif nf in dmap and (matchalways or matchfn(nf)):
1136 elif nf in dmap and (matchalways or matchfn(nf)):
1088 results[nf] = None
1137 results[nf] = None
1089
1138
1090 for nd, d in work:
1139 for nd, d in work:
1091 # alreadynormed means that processwork doesn't have to do any
1140 # alreadynormed means that processwork doesn't have to do any
1092 # expensive directory normalization
1141 # expensive directory normalization
1093 alreadynormed = not normalize or nd == d
1142 alreadynormed = not normalize or nd == d
1094 traverse([d], alreadynormed)
1143 traverse([d], alreadynormed)
1095
1144
1096 for s in subrepos:
1145 for s in subrepos:
1097 del results[s]
1146 del results[s]
1098 del results[b'.hg']
1147 del results[b'.hg']
1099
1148
1100 # step 3: visit remaining files from dmap
1149 # step 3: visit remaining files from dmap
1101 if not skipstep3 and not exact:
1150 if not skipstep3 and not exact:
1102 # If a dmap file is not in results yet, it was either
1151 # If a dmap file is not in results yet, it was either
1103 # a) not matching matchfn b) ignored, c) missing, or d) under a
1152 # a) not matching matchfn b) ignored, c) missing, or d) under a
1104 # symlink directory.
1153 # symlink directory.
1105 if not results and matchalways:
1154 if not results and matchalways:
1106 visit = [f for f in dmap]
1155 visit = [f for f in dmap]
1107 else:
1156 else:
1108 visit = [f for f in dmap if f not in results and matchfn(f)]
1157 visit = [f for f in dmap if f not in results and matchfn(f)]
1109 visit.sort()
1158 visit.sort()
1110
1159
1111 if unknown:
1160 if unknown:
1112 # unknown == True means we walked all dirs under the roots
1161 # unknown == True means we walked all dirs under the roots
1113 # that wasn't ignored, and everything that matched was stat'ed
1162 # that wasn't ignored, and everything that matched was stat'ed
1114 # and is already in results.
1163 # and is already in results.
1115 # The rest must thus be ignored or under a symlink.
1164 # The rest must thus be ignored or under a symlink.
1116 audit_path = pathutil.pathauditor(self._root, cached=True)
1165 audit_path = pathutil.pathauditor(self._root, cached=True)
1117
1166
1118 for nf in iter(visit):
1167 for nf in iter(visit):
1119 # If a stat for the same file was already added with a
1168 # If a stat for the same file was already added with a
1120 # different case, don't add one for this, since that would
1169 # different case, don't add one for this, since that would
1121 # make it appear as if the file exists under both names
1170 # make it appear as if the file exists under both names
1122 # on disk.
1171 # on disk.
1123 if (
1172 if (
1124 normalizefile
1173 normalizefile
1125 and normalizefile(nf, True, True) in results
1174 and normalizefile(nf, True, True) in results
1126 ):
1175 ):
1127 results[nf] = None
1176 results[nf] = None
1128 # Report ignored items in the dmap as long as they are not
1177 # Report ignored items in the dmap as long as they are not
1129 # under a symlink directory.
1178 # under a symlink directory.
1130 elif audit_path.check(nf):
1179 elif audit_path.check(nf):
1131 try:
1180 try:
1132 results[nf] = lstat(join(nf))
1181 results[nf] = lstat(join(nf))
1133 # file was just ignored, no links, and exists
1182 # file was just ignored, no links, and exists
1134 except OSError:
1183 except OSError:
1135 # file doesn't exist
1184 # file doesn't exist
1136 results[nf] = None
1185 results[nf] = None
1137 else:
1186 else:
1138 # It's either missing or under a symlink directory
1187 # It's either missing or under a symlink directory
1139 # which we in this case report as missing
1188 # which we in this case report as missing
1140 results[nf] = None
1189 results[nf] = None
1141 else:
1190 else:
1142 # We may not have walked the full directory tree above,
1191 # We may not have walked the full directory tree above,
1143 # so stat and check everything we missed.
1192 # so stat and check everything we missed.
1144 iv = iter(visit)
1193 iv = iter(visit)
1145 for st in util.statfiles([join(i) for i in visit]):
1194 for st in util.statfiles([join(i) for i in visit]):
1146 results[next(iv)] = st
1195 results[next(iv)] = st
1147 return results
1196 return results
1148
1197
1149 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1198 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1150 # Force Rayon (Rust parallelism library) to respect the number of
1199 # Force Rayon (Rust parallelism library) to respect the number of
1151 # workers. This is a temporary workaround until Rust code knows
1200 # workers. This is a temporary workaround until Rust code knows
1152 # how to read the config file.
1201 # how to read the config file.
1153 numcpus = self._ui.configint(b"worker", b"numcpus")
1202 numcpus = self._ui.configint(b"worker", b"numcpus")
1154 if numcpus is not None:
1203 if numcpus is not None:
1155 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1204 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1156
1205
1157 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1206 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1158 if not workers_enabled:
1207 if not workers_enabled:
1159 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1208 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1160
1209
1161 (
1210 (
1162 lookup,
1211 lookup,
1163 modified,
1212 modified,
1164 added,
1213 added,
1165 removed,
1214 removed,
1166 deleted,
1215 deleted,
1167 clean,
1216 clean,
1168 ignored,
1217 ignored,
1169 unknown,
1218 unknown,
1170 warnings,
1219 warnings,
1171 bad,
1220 bad,
1172 traversed,
1221 traversed,
1173 dirty,
1222 dirty,
1174 ) = rustmod.status(
1223 ) = rustmod.status(
1175 self._map._rustmap,
1224 self._map._rustmap,
1176 matcher,
1225 matcher,
1177 self._rootdir,
1226 self._rootdir,
1178 self._ignorefiles(),
1227 self._ignorefiles(),
1179 self._checkexec,
1228 self._checkexec,
1180 self._lastnormaltime,
1229 self._lastnormaltime,
1181 bool(list_clean),
1230 bool(list_clean),
1182 bool(list_ignored),
1231 bool(list_ignored),
1183 bool(list_unknown),
1232 bool(list_unknown),
1184 bool(matcher.traversedir),
1233 bool(matcher.traversedir),
1185 )
1234 )
1186
1235
1187 self._dirty |= dirty
1236 self._dirty |= dirty
1188
1237
1189 if matcher.traversedir:
1238 if matcher.traversedir:
1190 for dir in traversed:
1239 for dir in traversed:
1191 matcher.traversedir(dir)
1240 matcher.traversedir(dir)
1192
1241
1193 if self._ui.warn:
1242 if self._ui.warn:
1194 for item in warnings:
1243 for item in warnings:
1195 if isinstance(item, tuple):
1244 if isinstance(item, tuple):
1196 file_path, syntax = item
1245 file_path, syntax = item
1197 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1246 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1198 file_path,
1247 file_path,
1199 syntax,
1248 syntax,
1200 )
1249 )
1201 self._ui.warn(msg)
1250 self._ui.warn(msg)
1202 else:
1251 else:
1203 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1252 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1204 self._ui.warn(
1253 self._ui.warn(
1205 msg
1254 msg
1206 % (
1255 % (
1207 pathutil.canonpath(
1256 pathutil.canonpath(
1208 self._rootdir, self._rootdir, item
1257 self._rootdir, self._rootdir, item
1209 ),
1258 ),
1210 b"No such file or directory",
1259 b"No such file or directory",
1211 )
1260 )
1212 )
1261 )
1213
1262
1214 for (fn, message) in bad:
1263 for (fn, message) in bad:
1215 matcher.bad(fn, encoding.strtolocal(message))
1264 matcher.bad(fn, encoding.strtolocal(message))
1216
1265
1217 status = scmutil.status(
1266 status = scmutil.status(
1218 modified=modified,
1267 modified=modified,
1219 added=added,
1268 added=added,
1220 removed=removed,
1269 removed=removed,
1221 deleted=deleted,
1270 deleted=deleted,
1222 unknown=unknown,
1271 unknown=unknown,
1223 ignored=ignored,
1272 ignored=ignored,
1224 clean=clean,
1273 clean=clean,
1225 )
1274 )
1226 return (lookup, status)
1275 return (lookup, status)
1227
1276
1228 def status(self, match, subrepos, ignored, clean, unknown):
1277 def status(self, match, subrepos, ignored, clean, unknown):
1229 """Determine the status of the working copy relative to the
1278 """Determine the status of the working copy relative to the
1230 dirstate and return a pair of (unsure, status), where status is of type
1279 dirstate and return a pair of (unsure, status), where status is of type
1231 scmutil.status and:
1280 scmutil.status and:
1232
1281
1233 unsure:
1282 unsure:
1234 files that might have been modified since the dirstate was
1283 files that might have been modified since the dirstate was
1235 written, but need to be read to be sure (size is the same
1284 written, but need to be read to be sure (size is the same
1236 but mtime differs)
1285 but mtime differs)
1237 status.modified:
1286 status.modified:
1238 files that have definitely been modified since the dirstate
1287 files that have definitely been modified since the dirstate
1239 was written (different size or mode)
1288 was written (different size or mode)
1240 status.clean:
1289 status.clean:
1241 files that have definitely not been modified since the
1290 files that have definitely not been modified since the
1242 dirstate was written
1291 dirstate was written
1243 """
1292 """
1244 listignored, listclean, listunknown = ignored, clean, unknown
1293 listignored, listclean, listunknown = ignored, clean, unknown
1245 lookup, modified, added, unknown, ignored = [], [], [], [], []
1294 lookup, modified, added, unknown, ignored = [], [], [], [], []
1246 removed, deleted, clean = [], [], []
1295 removed, deleted, clean = [], [], []
1247
1296
1248 dmap = self._map
1297 dmap = self._map
1249 dmap.preload()
1298 dmap.preload()
1250
1299
1251 use_rust = True
1300 use_rust = True
1252
1301
1253 allowed_matchers = (
1302 allowed_matchers = (
1254 matchmod.alwaysmatcher,
1303 matchmod.alwaysmatcher,
1255 matchmod.exactmatcher,
1304 matchmod.exactmatcher,
1256 matchmod.includematcher,
1305 matchmod.includematcher,
1257 )
1306 )
1258
1307
1259 if rustmod is None:
1308 if rustmod is None:
1260 use_rust = False
1309 use_rust = False
1261 elif self._checkcase:
1310 elif self._checkcase:
1262 # Case-insensitive filesystems are not handled yet
1311 # Case-insensitive filesystems are not handled yet
1263 use_rust = False
1312 use_rust = False
1264 elif subrepos:
1313 elif subrepos:
1265 use_rust = False
1314 use_rust = False
1266 elif sparse.enabled:
1315 elif sparse.enabled:
1267 use_rust = False
1316 use_rust = False
1268 elif not isinstance(match, allowed_matchers):
1317 elif not isinstance(match, allowed_matchers):
1269 # Some matchers have yet to be implemented
1318 # Some matchers have yet to be implemented
1270 use_rust = False
1319 use_rust = False
1271
1320
1272 if use_rust:
1321 if use_rust:
1273 try:
1322 try:
1274 return self._rust_status(
1323 return self._rust_status(
1275 match, listclean, listignored, listunknown
1324 match, listclean, listignored, listunknown
1276 )
1325 )
1277 except rustmod.FallbackError:
1326 except rustmod.FallbackError:
1278 pass
1327 pass
1279
1328
1280 def noop(f):
1329 def noop(f):
1281 pass
1330 pass
1282
1331
1283 dcontains = dmap.__contains__
1332 dcontains = dmap.__contains__
1284 dget = dmap.__getitem__
1333 dget = dmap.__getitem__
1285 ladd = lookup.append # aka "unsure"
1334 ladd = lookup.append # aka "unsure"
1286 madd = modified.append
1335 madd = modified.append
1287 aadd = added.append
1336 aadd = added.append
1288 uadd = unknown.append if listunknown else noop
1337 uadd = unknown.append if listunknown else noop
1289 iadd = ignored.append if listignored else noop
1338 iadd = ignored.append if listignored else noop
1290 radd = removed.append
1339 radd = removed.append
1291 dadd = deleted.append
1340 dadd = deleted.append
1292 cadd = clean.append if listclean else noop
1341 cadd = clean.append if listclean else noop
1293 mexact = match.exact
1342 mexact = match.exact
1294 dirignore = self._dirignore
1343 dirignore = self._dirignore
1295 checkexec = self._checkexec
1344 checkexec = self._checkexec
1296 copymap = self._map.copymap
1345 copymap = self._map.copymap
1297 lastnormaltime = self._lastnormaltime
1346 lastnormaltime = self._lastnormaltime
1298
1347
1299 # We need to do full walks when either
1348 # We need to do full walks when either
1300 # - we're listing all clean files, or
1349 # - we're listing all clean files, or
1301 # - match.traversedir does something, because match.traversedir should
1350 # - match.traversedir does something, because match.traversedir should
1302 # be called for every dir in the working dir
1351 # be called for every dir in the working dir
1303 full = listclean or match.traversedir is not None
1352 full = listclean or match.traversedir is not None
1304 for fn, st in pycompat.iteritems(
1353 for fn, st in pycompat.iteritems(
1305 self.walk(match, subrepos, listunknown, listignored, full=full)
1354 self.walk(match, subrepos, listunknown, listignored, full=full)
1306 ):
1355 ):
1307 if not dcontains(fn):
1356 if not dcontains(fn):
1308 if (listignored or mexact(fn)) and dirignore(fn):
1357 if (listignored or mexact(fn)) and dirignore(fn):
1309 if listignored:
1358 if listignored:
1310 iadd(fn)
1359 iadd(fn)
1311 else:
1360 else:
1312 uadd(fn)
1361 uadd(fn)
1313 continue
1362 continue
1314
1363
1315 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1364 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1316 # written like that for performance reasons. dmap[fn] is not a
1365 # written like that for performance reasons. dmap[fn] is not a
1317 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1366 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1318 # opcode has fast paths when the value to be unpacked is a tuple or
1367 # opcode has fast paths when the value to be unpacked is a tuple or
1319 # a list, but falls back to creating a full-fledged iterator in
1368 # a list, but falls back to creating a full-fledged iterator in
1320 # general. That is much slower than simply accessing and storing the
1369 # general. That is much slower than simply accessing and storing the
1321 # tuple members one by one.
1370 # tuple members one by one.
1322 t = dget(fn)
1371 t = dget(fn)
1323 mode = t.mode
1372 mode = t.mode
1324 size = t.size
1373 size = t.size
1325 time = t.mtime
1374 time = t.mtime
1326
1375
1327 if not st and t.tracked:
1376 if not st and t.tracked:
1328 dadd(fn)
1377 dadd(fn)
1329 elif t.merged:
1378 elif t.merged:
1330 madd(fn)
1379 madd(fn)
1331 elif t.added:
1380 elif t.added:
1332 aadd(fn)
1381 aadd(fn)
1333 elif t.removed:
1382 elif t.removed:
1334 radd(fn)
1383 radd(fn)
1335 elif t.tracked:
1384 elif t.tracked:
1336 if (
1385 if (
1337 size >= 0
1386 size >= 0
1338 and (
1387 and (
1339 (size != st.st_size and size != st.st_size & _rangemask)
1388 (size != st.st_size and size != st.st_size & _rangemask)
1340 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1389 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1341 )
1390 )
1342 or t.from_p2
1391 or t.from_p2
1343 or fn in copymap
1392 or fn in copymap
1344 ):
1393 ):
1345 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1394 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1346 # issue6456: Size returned may be longer due to
1395 # issue6456: Size returned may be longer due to
1347 # encryption on EXT-4 fscrypt, undecided.
1396 # encryption on EXT-4 fscrypt, undecided.
1348 ladd(fn)
1397 ladd(fn)
1349 else:
1398 else:
1350 madd(fn)
1399 madd(fn)
1351 elif (
1400 elif (
1352 time != st[stat.ST_MTIME]
1401 time != st[stat.ST_MTIME]
1353 and time != st[stat.ST_MTIME] & _rangemask
1402 and time != st[stat.ST_MTIME] & _rangemask
1354 ):
1403 ):
1355 ladd(fn)
1404 ladd(fn)
1356 elif st[stat.ST_MTIME] == lastnormaltime:
1405 elif st[stat.ST_MTIME] == lastnormaltime:
1357 # fn may have just been marked as normal and it may have
1406 # fn may have just been marked as normal and it may have
1358 # changed in the same second without changing its size.
1407 # changed in the same second without changing its size.
1359 # This can happen if we quickly do multiple commits.
1408 # This can happen if we quickly do multiple commits.
1360 # Force lookup, so we don't miss such a racy file change.
1409 # Force lookup, so we don't miss such a racy file change.
1361 ladd(fn)
1410 ladd(fn)
1362 elif listclean:
1411 elif listclean:
1363 cadd(fn)
1412 cadd(fn)
1364 status = scmutil.status(
1413 status = scmutil.status(
1365 modified, added, removed, deleted, unknown, ignored, clean
1414 modified, added, removed, deleted, unknown, ignored, clean
1366 )
1415 )
1367 return (lookup, status)
1416 return (lookup, status)
1368
1417
1369 def matches(self, match):
1418 def matches(self, match):
1370 """
1419 """
1371 return files in the dirstate (in whatever state) filtered by match
1420 return files in the dirstate (in whatever state) filtered by match
1372 """
1421 """
1373 dmap = self._map
1422 dmap = self._map
1374 if rustmod is not None:
1423 if rustmod is not None:
1375 dmap = self._map._rustmap
1424 dmap = self._map._rustmap
1376
1425
1377 if match.always():
1426 if match.always():
1378 return dmap.keys()
1427 return dmap.keys()
1379 files = match.files()
1428 files = match.files()
1380 if match.isexact():
1429 if match.isexact():
1381 # fast path -- filter the other way around, since typically files is
1430 # fast path -- filter the other way around, since typically files is
1382 # much smaller than dmap
1431 # much smaller than dmap
1383 return [f for f in files if f in dmap]
1432 return [f for f in files if f in dmap]
1384 if match.prefix() and all(fn in dmap for fn in files):
1433 if match.prefix() and all(fn in dmap for fn in files):
1385 # fast path -- all the values are known to be files, so just return
1434 # fast path -- all the values are known to be files, so just return
1386 # that
1435 # that
1387 return list(files)
1436 return list(files)
1388 return [f for f in dmap if match(f)]
1437 return [f for f in dmap if match(f)]
1389
1438
1390 def _actualfilename(self, tr):
1439 def _actualfilename(self, tr):
1391 if tr:
1440 if tr:
1392 return self._pendingfilename
1441 return self._pendingfilename
1393 else:
1442 else:
1394 return self._filename
1443 return self._filename
1395
1444
1396 def savebackup(self, tr, backupname):
1445 def savebackup(self, tr, backupname):
1397 '''Save current dirstate into backup file'''
1446 '''Save current dirstate into backup file'''
1398 filename = self._actualfilename(tr)
1447 filename = self._actualfilename(tr)
1399 assert backupname != filename
1448 assert backupname != filename
1400
1449
1401 # use '_writedirstate' instead of 'write' to write changes certainly,
1450 # use '_writedirstate' instead of 'write' to write changes certainly,
1402 # because the latter omits writing out if transaction is running.
1451 # because the latter omits writing out if transaction is running.
1403 # output file will be used to create backup of dirstate at this point.
1452 # output file will be used to create backup of dirstate at this point.
1404 if self._dirty or not self._opener.exists(filename):
1453 if self._dirty or not self._opener.exists(filename):
1405 self._writedirstate(
1454 self._writedirstate(
1406 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1455 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1407 )
1456 )
1408
1457
1409 if tr:
1458 if tr:
1410 # ensure that subsequent tr.writepending returns True for
1459 # ensure that subsequent tr.writepending returns True for
1411 # changes written out above, even if dirstate is never
1460 # changes written out above, even if dirstate is never
1412 # changed after this
1461 # changed after this
1413 tr.addfilegenerator(
1462 tr.addfilegenerator(
1414 b'dirstate',
1463 b'dirstate',
1415 (self._filename,),
1464 (self._filename,),
1416 self._writedirstate,
1465 self._writedirstate,
1417 location=b'plain',
1466 location=b'plain',
1418 )
1467 )
1419
1468
1420 # ensure that pending file written above is unlinked at
1469 # ensure that pending file written above is unlinked at
1421 # failure, even if tr.writepending isn't invoked until the
1470 # failure, even if tr.writepending isn't invoked until the
1422 # end of this transaction
1471 # end of this transaction
1423 tr.registertmp(filename, location=b'plain')
1472 tr.registertmp(filename, location=b'plain')
1424
1473
1425 self._opener.tryunlink(backupname)
1474 self._opener.tryunlink(backupname)
1426 # hardlink backup is okay because _writedirstate is always called
1475 # hardlink backup is okay because _writedirstate is always called
1427 # with an "atomictemp=True" file.
1476 # with an "atomictemp=True" file.
1428 util.copyfile(
1477 util.copyfile(
1429 self._opener.join(filename),
1478 self._opener.join(filename),
1430 self._opener.join(backupname),
1479 self._opener.join(backupname),
1431 hardlink=True,
1480 hardlink=True,
1432 )
1481 )
1433
1482
1434 def restorebackup(self, tr, backupname):
1483 def restorebackup(self, tr, backupname):
1435 '''Restore dirstate by backup file'''
1484 '''Restore dirstate by backup file'''
1436 # this "invalidate()" prevents "wlock.release()" from writing
1485 # this "invalidate()" prevents "wlock.release()" from writing
1437 # changes of dirstate out after restoring from backup file
1486 # changes of dirstate out after restoring from backup file
1438 self.invalidate()
1487 self.invalidate()
1439 filename = self._actualfilename(tr)
1488 filename = self._actualfilename(tr)
1440 o = self._opener
1489 o = self._opener
1441 if util.samefile(o.join(backupname), o.join(filename)):
1490 if util.samefile(o.join(backupname), o.join(filename)):
1442 o.unlink(backupname)
1491 o.unlink(backupname)
1443 else:
1492 else:
1444 o.rename(backupname, filename, checkambig=True)
1493 o.rename(backupname, filename, checkambig=True)
1445
1494
1446 def clearbackup(self, tr, backupname):
1495 def clearbackup(self, tr, backupname):
1447 '''Clear backup file'''
1496 '''Clear backup file'''
1448 self._opener.unlink(backupname)
1497 self._opener.unlink(backupname)
@@ -1,2299 +1,2289
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Olivia Mackall <olivia@selenic.com>
3 # Copyright Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirrev,
24 wdirrev,
25 )
25 )
26 from .pycompat import getattr
26 from .pycompat import getattr
27 from .thirdparty import attr
27 from .thirdparty import attr
28 from . import (
28 from . import (
29 copies as copiesmod,
29 copies as copiesmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 policy,
37 policy,
38 pycompat,
38 pycompat,
39 requirements as requirementsmod,
39 requirements as requirementsmod,
40 revsetlang,
40 revsetlang,
41 similar,
41 similar,
42 smartset,
42 smartset,
43 url,
43 url,
44 util,
44 util,
45 vfs,
45 vfs,
46 )
46 )
47
47
48 from .utils import (
48 from .utils import (
49 hashutil,
49 hashutil,
50 procutil,
50 procutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 if pycompat.iswindows:
54 if pycompat.iswindows:
55 from . import scmwindows as scmplatform
55 from . import scmwindows as scmplatform
56 else:
56 else:
57 from . import scmposix as scmplatform
57 from . import scmposix as scmplatform
58
58
59 parsers = policy.importmod('parsers')
59 parsers = policy.importmod('parsers')
60 rustrevlog = policy.importrust('revlog')
60 rustrevlog = policy.importrust('revlog')
61
61
62 termsize = scmplatform.termsize
62 termsize = scmplatform.termsize
63
63
64
64
65 @attr.s(slots=True, repr=False)
65 @attr.s(slots=True, repr=False)
66 class status(object):
66 class status(object):
67 """Struct with a list of files per status.
67 """Struct with a list of files per status.
68
68
69 The 'deleted', 'unknown' and 'ignored' properties are only
69 The 'deleted', 'unknown' and 'ignored' properties are only
70 relevant to the working copy.
70 relevant to the working copy.
71 """
71 """
72
72
73 modified = attr.ib(default=attr.Factory(list))
73 modified = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
80
80
81 def __iter__(self):
81 def __iter__(self):
82 yield self.modified
82 yield self.modified
83 yield self.added
83 yield self.added
84 yield self.removed
84 yield self.removed
85 yield self.deleted
85 yield self.deleted
86 yield self.unknown
86 yield self.unknown
87 yield self.ignored
87 yield self.ignored
88 yield self.clean
88 yield self.clean
89
89
90 def __repr__(self):
90 def __repr__(self):
91 return (
91 return (
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 r'unknown=%s, ignored=%s, clean=%s>'
93 r'unknown=%s, ignored=%s, clean=%s>'
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95
95
96
96
97 def itersubrepos(ctx1, ctx2):
97 def itersubrepos(ctx1, ctx2):
98 """find subrepos in ctx1 or ctx2"""
98 """find subrepos in ctx1 or ctx2"""
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # has been modified (in ctx2) but not yet committed (in ctx1).
101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104
104
105 missing = set()
105 missing = set()
106
106
107 for subpath in ctx2.substate:
107 for subpath in ctx2.substate:
108 if subpath not in ctx1.substate:
108 if subpath not in ctx1.substate:
109 del subpaths[subpath]
109 del subpaths[subpath]
110 missing.add(subpath)
110 missing.add(subpath)
111
111
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 yield subpath, ctx.sub(subpath)
113 yield subpath, ctx.sub(subpath)
114
114
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # status and diff will have an accurate result when it does
116 # status and diff will have an accurate result when it does
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # against itself.
118 # against itself.
119 for subpath in missing:
119 for subpath in missing:
120 yield subpath, ctx2.nullsub(subpath, ctx1)
120 yield subpath, ctx2.nullsub(subpath, ctx1)
121
121
122
122
123 def nochangesfound(ui, repo, excluded=None):
123 def nochangesfound(ui, repo, excluded=None):
124 """Report no changes for push/pull, excluded is None or a list of
124 """Report no changes for push/pull, excluded is None or a list of
125 nodes excluded from the push/pull.
125 nodes excluded from the push/pull.
126 """
126 """
127 secretlist = []
127 secretlist = []
128 if excluded:
128 if excluded:
129 for n in excluded:
129 for n in excluded:
130 ctx = repo[n]
130 ctx = repo[n]
131 if ctx.phase() >= phases.secret and not ctx.extinct():
131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 secretlist.append(n)
132 secretlist.append(n)
133
133
134 if secretlist:
134 if secretlist:
135 ui.status(
135 ui.status(
136 _(b"no changes found (ignored %d secret changesets)\n")
136 _(b"no changes found (ignored %d secret changesets)\n")
137 % len(secretlist)
137 % len(secretlist)
138 )
138 )
139 else:
139 else:
140 ui.status(_(b"no changes found\n"))
140 ui.status(_(b"no changes found\n"))
141
141
142
142
143 def callcatch(ui, func):
143 def callcatch(ui, func):
144 """call func() with global exception handling
144 """call func() with global exception handling
145
145
146 return func() if no exception happens. otherwise do some error handling
146 return func() if no exception happens. otherwise do some error handling
147 and return an exit code accordingly. does not handle all exceptions.
147 and return an exit code accordingly. does not handle all exceptions.
148 """
148 """
149 coarse_exit_code = -1
149 coarse_exit_code = -1
150 detailed_exit_code = -1
150 detailed_exit_code = -1
151 try:
151 try:
152 try:
152 try:
153 return func()
153 return func()
154 except: # re-raises
154 except: # re-raises
155 ui.traceback()
155 ui.traceback()
156 raise
156 raise
157 # Global exception handling, alphabetically
157 # Global exception handling, alphabetically
158 # Mercurial-specific first, followed by built-in and library exceptions
158 # Mercurial-specific first, followed by built-in and library exceptions
159 except error.LockHeld as inst:
159 except error.LockHeld as inst:
160 detailed_exit_code = 20
160 detailed_exit_code = 20
161 if inst.errno == errno.ETIMEDOUT:
161 if inst.errno == errno.ETIMEDOUT:
162 reason = _(b'timed out waiting for lock held by %r') % (
162 reason = _(b'timed out waiting for lock held by %r') % (
163 pycompat.bytestr(inst.locker)
163 pycompat.bytestr(inst.locker)
164 )
164 )
165 else:
165 else:
166 reason = _(b'lock held by %r') % inst.locker
166 reason = _(b'lock held by %r') % inst.locker
167 ui.error(
167 ui.error(
168 _(b"abort: %s: %s\n")
168 _(b"abort: %s: %s\n")
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 )
170 )
171 if not inst.locker:
171 if not inst.locker:
172 ui.error(_(b"(lock might be very busy)\n"))
172 ui.error(_(b"(lock might be very busy)\n"))
173 except error.LockUnavailable as inst:
173 except error.LockUnavailable as inst:
174 detailed_exit_code = 20
174 detailed_exit_code = 20
175 ui.error(
175 ui.error(
176 _(b"abort: could not lock %s: %s\n")
176 _(b"abort: could not lock %s: %s\n")
177 % (
177 % (
178 inst.desc or stringutil.forcebytestr(inst.filename),
178 inst.desc or stringutil.forcebytestr(inst.filename),
179 encoding.strtolocal(inst.strerror),
179 encoding.strtolocal(inst.strerror),
180 )
180 )
181 )
181 )
182 except error.RepoError as inst:
182 except error.RepoError as inst:
183 ui.error(_(b"abort: %s\n") % inst)
183 ui.error(_(b"abort: %s\n") % inst)
184 if inst.hint:
184 if inst.hint:
185 ui.error(_(b"(%s)\n") % inst.hint)
185 ui.error(_(b"(%s)\n") % inst.hint)
186 except error.ResponseError as inst:
186 except error.ResponseError as inst:
187 ui.error(_(b"abort: %s") % inst.args[0])
187 ui.error(_(b"abort: %s") % inst.args[0])
188 msg = inst.args[1]
188 msg = inst.args[1]
189 if isinstance(msg, type(u'')):
189 if isinstance(msg, type(u'')):
190 msg = pycompat.sysbytes(msg)
190 msg = pycompat.sysbytes(msg)
191 if msg is None:
191 if msg is None:
192 ui.error(b"\n")
192 ui.error(b"\n")
193 elif not isinstance(msg, bytes):
193 elif not isinstance(msg, bytes):
194 ui.error(b" %r\n" % (msg,))
194 ui.error(b" %r\n" % (msg,))
195 elif not msg:
195 elif not msg:
196 ui.error(_(b" empty string\n"))
196 ui.error(_(b" empty string\n"))
197 else:
197 else:
198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
199 except error.CensoredNodeError as inst:
199 except error.CensoredNodeError as inst:
200 ui.error(_(b"abort: file censored %s\n") % inst)
200 ui.error(_(b"abort: file censored %s\n") % inst)
201 except error.WdirUnsupported:
201 except error.WdirUnsupported:
202 ui.error(_(b"abort: working directory revision cannot be specified\n"))
202 ui.error(_(b"abort: working directory revision cannot be specified\n"))
203 except error.Error as inst:
203 except error.Error as inst:
204 if inst.detailed_exit_code is not None:
204 if inst.detailed_exit_code is not None:
205 detailed_exit_code = inst.detailed_exit_code
205 detailed_exit_code = inst.detailed_exit_code
206 if inst.coarse_exit_code is not None:
206 if inst.coarse_exit_code is not None:
207 coarse_exit_code = inst.coarse_exit_code
207 coarse_exit_code = inst.coarse_exit_code
208 ui.error(inst.format())
208 ui.error(inst.format())
209 except error.WorkerError as inst:
209 except error.WorkerError as inst:
210 # Don't print a message -- the worker already should have
210 # Don't print a message -- the worker already should have
211 return inst.status_code
211 return inst.status_code
212 except ImportError as inst:
212 except ImportError as inst:
213 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
213 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
214 m = stringutil.forcebytestr(inst).split()[-1]
214 m = stringutil.forcebytestr(inst).split()[-1]
215 if m in b"mpatch bdiff".split():
215 if m in b"mpatch bdiff".split():
216 ui.error(_(b"(did you forget to compile extensions?)\n"))
216 ui.error(_(b"(did you forget to compile extensions?)\n"))
217 elif m in b"zlib".split():
217 elif m in b"zlib".split():
218 ui.error(_(b"(is your Python install correct?)\n"))
218 ui.error(_(b"(is your Python install correct?)\n"))
219 except util.urlerr.httperror as inst:
219 except util.urlerr.httperror as inst:
220 detailed_exit_code = 100
220 detailed_exit_code = 100
221 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
221 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
222 except util.urlerr.urlerror as inst:
222 except util.urlerr.urlerror as inst:
223 detailed_exit_code = 100
223 detailed_exit_code = 100
224 try: # usually it is in the form (errno, strerror)
224 try: # usually it is in the form (errno, strerror)
225 reason = inst.reason.args[1]
225 reason = inst.reason.args[1]
226 except (AttributeError, IndexError):
226 except (AttributeError, IndexError):
227 # it might be anything, for example a string
227 # it might be anything, for example a string
228 reason = inst.reason
228 reason = inst.reason
229 if isinstance(reason, pycompat.unicode):
229 if isinstance(reason, pycompat.unicode):
230 # SSLError of Python 2.7.9 contains a unicode
230 # SSLError of Python 2.7.9 contains a unicode
231 reason = encoding.unitolocal(reason)
231 reason = encoding.unitolocal(reason)
232 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
232 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
233 except (IOError, OSError) as inst:
233 except (IOError, OSError) as inst:
234 if (
234 if (
235 util.safehasattr(inst, b"args")
235 util.safehasattr(inst, b"args")
236 and inst.args
236 and inst.args
237 and inst.args[0] == errno.EPIPE
237 and inst.args[0] == errno.EPIPE
238 ):
238 ):
239 pass
239 pass
240 elif getattr(inst, "strerror", None): # common IOError or OSError
240 elif getattr(inst, "strerror", None): # common IOError or OSError
241 if getattr(inst, "filename", None) is not None:
241 if getattr(inst, "filename", None) is not None:
242 ui.error(
242 ui.error(
243 _(b"abort: %s: '%s'\n")
243 _(b"abort: %s: '%s'\n")
244 % (
244 % (
245 encoding.strtolocal(inst.strerror),
245 encoding.strtolocal(inst.strerror),
246 stringutil.forcebytestr(inst.filename),
246 stringutil.forcebytestr(inst.filename),
247 )
247 )
248 )
248 )
249 else:
249 else:
250 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
250 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
251 else: # suspicious IOError
251 else: # suspicious IOError
252 raise
252 raise
253 except MemoryError:
253 except MemoryError:
254 ui.error(_(b"abort: out of memory\n"))
254 ui.error(_(b"abort: out of memory\n"))
255 except SystemExit as inst:
255 except SystemExit as inst:
256 # Commands shouldn't sys.exit directly, but give a return code.
256 # Commands shouldn't sys.exit directly, but give a return code.
257 # Just in case catch this and and pass exit code to caller.
257 # Just in case catch this and and pass exit code to caller.
258 detailed_exit_code = 254
258 detailed_exit_code = 254
259 coarse_exit_code = inst.code
259 coarse_exit_code = inst.code
260
260
261 if ui.configbool(b'ui', b'detailed-exit-code'):
261 if ui.configbool(b'ui', b'detailed-exit-code'):
262 return detailed_exit_code
262 return detailed_exit_code
263 else:
263 else:
264 return coarse_exit_code
264 return coarse_exit_code
265
265
266
266
267 def checknewlabel(repo, lbl, kind):
267 def checknewlabel(repo, lbl, kind):
268 # Do not use the "kind" parameter in ui output.
268 # Do not use the "kind" parameter in ui output.
269 # It makes strings difficult to translate.
269 # It makes strings difficult to translate.
270 if lbl in [b'tip', b'.', b'null']:
270 if lbl in [b'tip', b'.', b'null']:
271 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
271 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
272 for c in (b':', b'\0', b'\n', b'\r'):
272 for c in (b':', b'\0', b'\n', b'\r'):
273 if c in lbl:
273 if c in lbl:
274 raise error.InputError(
274 raise error.InputError(
275 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
275 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
276 )
276 )
277 try:
277 try:
278 int(lbl)
278 int(lbl)
279 raise error.InputError(_(b"cannot use an integer as a name"))
279 raise error.InputError(_(b"cannot use an integer as a name"))
280 except ValueError:
280 except ValueError:
281 pass
281 pass
282 if lbl.strip() != lbl:
282 if lbl.strip() != lbl:
283 raise error.InputError(
283 raise error.InputError(
284 _(b"leading or trailing whitespace in name %r") % lbl
284 _(b"leading or trailing whitespace in name %r") % lbl
285 )
285 )
286
286
287
287
288 def checkfilename(f):
288 def checkfilename(f):
289 '''Check that the filename f is an acceptable filename for a tracked file'''
289 '''Check that the filename f is an acceptable filename for a tracked file'''
290 if b'\r' in f or b'\n' in f:
290 if b'\r' in f or b'\n' in f:
291 raise error.InputError(
291 raise error.InputError(
292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
293 % pycompat.bytestr(f)
293 % pycompat.bytestr(f)
294 )
294 )
295
295
296
296
297 def checkportable(ui, f):
297 def checkportable(ui, f):
298 '''Check if filename f is portable and warn or abort depending on config'''
298 '''Check if filename f is portable and warn or abort depending on config'''
299 checkfilename(f)
299 checkfilename(f)
300 abort, warn = checkportabilityalert(ui)
300 abort, warn = checkportabilityalert(ui)
301 if abort or warn:
301 if abort or warn:
302 msg = util.checkwinfilename(f)
302 msg = util.checkwinfilename(f)
303 if msg:
303 if msg:
304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
305 if abort:
305 if abort:
306 raise error.InputError(msg)
306 raise error.InputError(msg)
307 ui.warn(_(b"warning: %s\n") % msg)
307 ui.warn(_(b"warning: %s\n") % msg)
308
308
309
309
310 def checkportabilityalert(ui):
310 def checkportabilityalert(ui):
311 """check if the user's config requests nothing, a warning, or abort for
311 """check if the user's config requests nothing, a warning, or abort for
312 non-portable filenames"""
312 non-portable filenames"""
313 val = ui.config(b'ui', b'portablefilenames')
313 val = ui.config(b'ui', b'portablefilenames')
314 lval = val.lower()
314 lval = val.lower()
315 bval = stringutil.parsebool(val)
315 bval = stringutil.parsebool(val)
316 abort = pycompat.iswindows or lval == b'abort'
316 abort = pycompat.iswindows or lval == b'abort'
317 warn = bval or lval == b'warn'
317 warn = bval or lval == b'warn'
318 if bval is None and not (warn or abort or lval == b'ignore'):
318 if bval is None and not (warn or abort or lval == b'ignore'):
319 raise error.ConfigError(
319 raise error.ConfigError(
320 _(b"ui.portablefilenames value is invalid ('%s')") % val
320 _(b"ui.portablefilenames value is invalid ('%s')") % val
321 )
321 )
322 return abort, warn
322 return abort, warn
323
323
324
324
325 class casecollisionauditor(object):
325 class casecollisionauditor(object):
326 def __init__(self, ui, abort, dirstate):
326 def __init__(self, ui, abort, dirstate):
327 self._ui = ui
327 self._ui = ui
328 self._abort = abort
328 self._abort = abort
329 allfiles = b'\0'.join(dirstate)
329 allfiles = b'\0'.join(dirstate)
330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
331 self._dirstate = dirstate
331 self._dirstate = dirstate
332 # The purpose of _newfiles is so that we don't complain about
332 # The purpose of _newfiles is so that we don't complain about
333 # case collisions if someone were to call this object with the
333 # case collisions if someone were to call this object with the
334 # same filename twice.
334 # same filename twice.
335 self._newfiles = set()
335 self._newfiles = set()
336
336
337 def __call__(self, f):
337 def __call__(self, f):
338 if f in self._newfiles:
338 if f in self._newfiles:
339 return
339 return
340 fl = encoding.lower(f)
340 fl = encoding.lower(f)
341 if fl in self._loweredfiles and f not in self._dirstate:
341 if fl in self._loweredfiles and f not in self._dirstate:
342 msg = _(b'possible case-folding collision for %s') % f
342 msg = _(b'possible case-folding collision for %s') % f
343 if self._abort:
343 if self._abort:
344 raise error.Abort(msg)
344 raise error.Abort(msg)
345 self._ui.warn(_(b"warning: %s\n") % msg)
345 self._ui.warn(_(b"warning: %s\n") % msg)
346 self._loweredfiles.add(fl)
346 self._loweredfiles.add(fl)
347 self._newfiles.add(f)
347 self._newfiles.add(f)
348
348
349
349
350 def filteredhash(repo, maxrev):
350 def filteredhash(repo, maxrev):
351 """build hash of filtered revisions in the current repoview.
351 """build hash of filtered revisions in the current repoview.
352
352
353 Multiple caches perform up-to-date validation by checking that the
353 Multiple caches perform up-to-date validation by checking that the
354 tiprev and tipnode stored in the cache file match the current repository.
354 tiprev and tipnode stored in the cache file match the current repository.
355 However, this is not sufficient for validating repoviews because the set
355 However, this is not sufficient for validating repoviews because the set
356 of revisions in the view may change without the repository tiprev and
356 of revisions in the view may change without the repository tiprev and
357 tipnode changing.
357 tipnode changing.
358
358
359 This function hashes all the revs filtered from the view and returns
359 This function hashes all the revs filtered from the view and returns
360 that SHA-1 digest.
360 that SHA-1 digest.
361 """
361 """
362 cl = repo.changelog
362 cl = repo.changelog
363 if not cl.filteredrevs:
363 if not cl.filteredrevs:
364 return None
364 return None
365 key = cl._filteredrevs_hashcache.get(maxrev)
365 key = cl._filteredrevs_hashcache.get(maxrev)
366 if not key:
366 if not key:
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 if revs:
368 if revs:
369 s = hashutil.sha1()
369 s = hashutil.sha1()
370 for rev in revs:
370 for rev in revs:
371 s.update(b'%d;' % rev)
371 s.update(b'%d;' % rev)
372 key = s.digest()
372 key = s.digest()
373 cl._filteredrevs_hashcache[maxrev] = key
373 cl._filteredrevs_hashcache[maxrev] = key
374 return key
374 return key
375
375
376
376
377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
378 """yield every hg repository under path, always recursively.
378 """yield every hg repository under path, always recursively.
379 The recurse flag will only control recursion into repo working dirs"""
379 The recurse flag will only control recursion into repo working dirs"""
380
380
381 def errhandler(err):
381 def errhandler(err):
382 if err.filename == path:
382 if err.filename == path:
383 raise err
383 raise err
384
384
385 samestat = getattr(os.path, 'samestat', None)
385 samestat = getattr(os.path, 'samestat', None)
386 if followsym and samestat is not None:
386 if followsym and samestat is not None:
387
387
388 def adddir(dirlst, dirname):
388 def adddir(dirlst, dirname):
389 dirstat = os.stat(dirname)
389 dirstat = os.stat(dirname)
390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
391 if not match:
391 if not match:
392 dirlst.append(dirstat)
392 dirlst.append(dirstat)
393 return not match
393 return not match
394
394
395 else:
395 else:
396 followsym = False
396 followsym = False
397
397
398 if (seen_dirs is None) and followsym:
398 if (seen_dirs is None) and followsym:
399 seen_dirs = []
399 seen_dirs = []
400 adddir(seen_dirs, path)
400 adddir(seen_dirs, path)
401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
402 dirs.sort()
402 dirs.sort()
403 if b'.hg' in dirs:
403 if b'.hg' in dirs:
404 yield root # found a repository
404 yield root # found a repository
405 qroot = os.path.join(root, b'.hg', b'patches')
405 qroot = os.path.join(root, b'.hg', b'patches')
406 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 if os.path.isdir(os.path.join(qroot, b'.hg')):
407 yield qroot # we have a patch queue repo here
407 yield qroot # we have a patch queue repo here
408 if recurse:
408 if recurse:
409 # avoid recursing inside the .hg directory
409 # avoid recursing inside the .hg directory
410 dirs.remove(b'.hg')
410 dirs.remove(b'.hg')
411 else:
411 else:
412 dirs[:] = [] # don't descend further
412 dirs[:] = [] # don't descend further
413 elif followsym:
413 elif followsym:
414 newdirs = []
414 newdirs = []
415 for d in dirs:
415 for d in dirs:
416 fname = os.path.join(root, d)
416 fname = os.path.join(root, d)
417 if adddir(seen_dirs, fname):
417 if adddir(seen_dirs, fname):
418 if os.path.islink(fname):
418 if os.path.islink(fname):
419 for hgname in walkrepos(fname, True, seen_dirs):
419 for hgname in walkrepos(fname, True, seen_dirs):
420 yield hgname
420 yield hgname
421 else:
421 else:
422 newdirs.append(d)
422 newdirs.append(d)
423 dirs[:] = newdirs
423 dirs[:] = newdirs
424
424
425
425
426 def binnode(ctx):
426 def binnode(ctx):
427 """Return binary node id for a given basectx"""
427 """Return binary node id for a given basectx"""
428 node = ctx.node()
428 node = ctx.node()
429 if node is None:
429 if node is None:
430 return ctx.repo().nodeconstants.wdirid
430 return ctx.repo().nodeconstants.wdirid
431 return node
431 return node
432
432
433
433
434 def intrev(ctx):
434 def intrev(ctx):
435 """Return integer for a given basectx that can be used in comparison or
435 """Return integer for a given basectx that can be used in comparison or
436 arithmetic operation"""
436 arithmetic operation"""
437 rev = ctx.rev()
437 rev = ctx.rev()
438 if rev is None:
438 if rev is None:
439 return wdirrev
439 return wdirrev
440 return rev
440 return rev
441
441
442
442
443 def formatchangeid(ctx):
443 def formatchangeid(ctx):
444 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 """Format changectx as '{rev}:{node|formatnode}', which is the default
445 template provided by logcmdutil.changesettemplater"""
445 template provided by logcmdutil.changesettemplater"""
446 repo = ctx.repo()
446 repo = ctx.repo()
447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
448
448
449
449
450 def formatrevnode(ui, rev, node):
450 def formatrevnode(ui, rev, node):
451 """Format given revision and node depending on the current verbosity"""
451 """Format given revision and node depending on the current verbosity"""
452 if ui.debugflag:
452 if ui.debugflag:
453 hexfunc = hex
453 hexfunc = hex
454 else:
454 else:
455 hexfunc = short
455 hexfunc = short
456 return b'%d:%s' % (rev, hexfunc(node))
456 return b'%d:%s' % (rev, hexfunc(node))
457
457
458
458
459 def resolvehexnodeidprefix(repo, prefix):
459 def resolvehexnodeidprefix(repo, prefix):
460 if prefix.startswith(b'x'):
460 if prefix.startswith(b'x'):
461 prefix = prefix[1:]
461 prefix = prefix[1:]
462 try:
462 try:
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 # This matches the shortesthexnodeidprefix() function below.
464 # This matches the shortesthexnodeidprefix() function below.
465 node = repo.unfiltered().changelog._partialmatch(prefix)
465 node = repo.unfiltered().changelog._partialmatch(prefix)
466 except error.AmbiguousPrefixLookupError:
466 except error.AmbiguousPrefixLookupError:
467 revset = repo.ui.config(
467 revset = repo.ui.config(
468 b'experimental', b'revisions.disambiguatewithin'
468 b'experimental', b'revisions.disambiguatewithin'
469 )
469 )
470 if revset:
470 if revset:
471 # Clear config to avoid infinite recursion
471 # Clear config to avoid infinite recursion
472 configoverrides = {
472 configoverrides = {
473 (b'experimental', b'revisions.disambiguatewithin'): None
473 (b'experimental', b'revisions.disambiguatewithin'): None
474 }
474 }
475 with repo.ui.configoverride(configoverrides):
475 with repo.ui.configoverride(configoverrides):
476 revs = repo.anyrevs([revset], user=True)
476 revs = repo.anyrevs([revset], user=True)
477 matches = []
477 matches = []
478 for rev in revs:
478 for rev in revs:
479 node = repo.changelog.node(rev)
479 node = repo.changelog.node(rev)
480 if hex(node).startswith(prefix):
480 if hex(node).startswith(prefix):
481 matches.append(node)
481 matches.append(node)
482 if len(matches) == 1:
482 if len(matches) == 1:
483 return matches[0]
483 return matches[0]
484 raise
484 raise
485 if node is None:
485 if node is None:
486 return
486 return
487 repo.changelog.rev(node) # make sure node isn't filtered
487 repo.changelog.rev(node) # make sure node isn't filtered
488 return node
488 return node
489
489
490
490
491 def mayberevnum(repo, prefix):
491 def mayberevnum(repo, prefix):
492 """Checks if the given prefix may be mistaken for a revision number"""
492 """Checks if the given prefix may be mistaken for a revision number"""
493 try:
493 try:
494 i = int(prefix)
494 i = int(prefix)
495 # if we are a pure int, then starting with zero will not be
495 # if we are a pure int, then starting with zero will not be
496 # confused as a rev; or, obviously, if the int is larger
496 # confused as a rev; or, obviously, if the int is larger
497 # than the value of the tip rev. We still need to disambiguate if
497 # than the value of the tip rev. We still need to disambiguate if
498 # prefix == '0', since that *is* a valid revnum.
498 # prefix == '0', since that *is* a valid revnum.
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 return False
500 return False
501 return True
501 return True
502 except ValueError:
502 except ValueError:
503 return False
503 return False
504
504
505
505
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 """Find the shortest unambiguous prefix that matches hexnode.
507 """Find the shortest unambiguous prefix that matches hexnode.
508
508
509 If "cache" is not None, it must be a dictionary that can be used for
509 If "cache" is not None, it must be a dictionary that can be used for
510 caching between calls to this method.
510 caching between calls to this method.
511 """
511 """
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 # which would be unacceptably slow. so we look for hash collision in
513 # which would be unacceptably slow. so we look for hash collision in
514 # unfiltered space, which means some hashes may be slightly longer.
514 # unfiltered space, which means some hashes may be slightly longer.
515
515
516 minlength = max(minlength, 1)
516 minlength = max(minlength, 1)
517
517
518 def disambiguate(prefix):
518 def disambiguate(prefix):
519 """Disambiguate against revnums."""
519 """Disambiguate against revnums."""
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 if mayberevnum(repo, prefix):
521 if mayberevnum(repo, prefix):
522 return b'x' + prefix
522 return b'x' + prefix
523 else:
523 else:
524 return prefix
524 return prefix
525
525
526 hexnode = hex(node)
526 hexnode = hex(node)
527 for length in range(len(prefix), len(hexnode) + 1):
527 for length in range(len(prefix), len(hexnode) + 1):
528 prefix = hexnode[:length]
528 prefix = hexnode[:length]
529 if not mayberevnum(repo, prefix):
529 if not mayberevnum(repo, prefix):
530 return prefix
530 return prefix
531
531
532 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 if revset:
534 if revset:
535 revs = None
535 revs = None
536 if cache is not None:
536 if cache is not None:
537 revs = cache.get(b'disambiguationrevset')
537 revs = cache.get(b'disambiguationrevset')
538 if revs is None:
538 if revs is None:
539 revs = repo.anyrevs([revset], user=True)
539 revs = repo.anyrevs([revset], user=True)
540 if cache is not None:
540 if cache is not None:
541 cache[b'disambiguationrevset'] = revs
541 cache[b'disambiguationrevset'] = revs
542 if cl.rev(node) in revs:
542 if cl.rev(node) in revs:
543 hexnode = hex(node)
543 hexnode = hex(node)
544 nodetree = None
544 nodetree = None
545 if cache is not None:
545 if cache is not None:
546 nodetree = cache.get(b'disambiguationnodetree')
546 nodetree = cache.get(b'disambiguationnodetree')
547 if not nodetree:
547 if not nodetree:
548 if util.safehasattr(parsers, 'nodetree'):
548 if util.safehasattr(parsers, 'nodetree'):
549 # The CExt is the only implementation to provide a nodetree
549 # The CExt is the only implementation to provide a nodetree
550 # class so far.
550 # class so far.
551 index = cl.index
551 index = cl.index
552 if util.safehasattr(index, 'get_cindex'):
552 if util.safehasattr(index, 'get_cindex'):
553 # the rust wrapped need to give access to its internal index
553 # the rust wrapped need to give access to its internal index
554 index = index.get_cindex()
554 index = index.get_cindex()
555 nodetree = parsers.nodetree(index, len(revs))
555 nodetree = parsers.nodetree(index, len(revs))
556 for r in revs:
556 for r in revs:
557 nodetree.insert(r)
557 nodetree.insert(r)
558 if cache is not None:
558 if cache is not None:
559 cache[b'disambiguationnodetree'] = nodetree
559 cache[b'disambiguationnodetree'] = nodetree
560 if nodetree is not None:
560 if nodetree is not None:
561 length = max(nodetree.shortest(node), minlength)
561 length = max(nodetree.shortest(node), minlength)
562 prefix = hexnode[:length]
562 prefix = hexnode[:length]
563 return disambiguate(prefix)
563 return disambiguate(prefix)
564 for length in range(minlength, len(hexnode) + 1):
564 for length in range(minlength, len(hexnode) + 1):
565 matches = []
565 matches = []
566 prefix = hexnode[:length]
566 prefix = hexnode[:length]
567 for rev in revs:
567 for rev in revs:
568 otherhexnode = repo[rev].hex()
568 otherhexnode = repo[rev].hex()
569 if prefix == otherhexnode[:length]:
569 if prefix == otherhexnode[:length]:
570 matches.append(otherhexnode)
570 matches.append(otherhexnode)
571 if len(matches) == 1:
571 if len(matches) == 1:
572 return disambiguate(prefix)
572 return disambiguate(prefix)
573
573
574 try:
574 try:
575 return disambiguate(cl.shortest(node, minlength))
575 return disambiguate(cl.shortest(node, minlength))
576 except error.LookupError:
576 except error.LookupError:
577 raise error.RepoLookupError()
577 raise error.RepoLookupError()
578
578
579
579
580 def isrevsymbol(repo, symbol):
580 def isrevsymbol(repo, symbol):
581 """Checks if a symbol exists in the repo.
581 """Checks if a symbol exists in the repo.
582
582
583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
584 symbol is an ambiguous nodeid prefix.
584 symbol is an ambiguous nodeid prefix.
585 """
585 """
586 try:
586 try:
587 revsymbol(repo, symbol)
587 revsymbol(repo, symbol)
588 return True
588 return True
589 except error.RepoLookupError:
589 except error.RepoLookupError:
590 return False
590 return False
591
591
592
592
593 def revsymbol(repo, symbol):
593 def revsymbol(repo, symbol):
594 """Returns a context given a single revision symbol (as string).
594 """Returns a context given a single revision symbol (as string).
595
595
596 This is similar to revsingle(), but accepts only a single revision symbol,
596 This is similar to revsingle(), but accepts only a single revision symbol,
597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
598 not "max(public())".
598 not "max(public())".
599 """
599 """
600 if not isinstance(symbol, bytes):
600 if not isinstance(symbol, bytes):
601 msg = (
601 msg = (
602 b"symbol (%s of type %s) was not a string, did you mean "
602 b"symbol (%s of type %s) was not a string, did you mean "
603 b"repo[symbol]?" % (symbol, type(symbol))
603 b"repo[symbol]?" % (symbol, type(symbol))
604 )
604 )
605 raise error.ProgrammingError(msg)
605 raise error.ProgrammingError(msg)
606 try:
606 try:
607 if symbol in (b'.', b'tip', b'null'):
607 if symbol in (b'.', b'tip', b'null'):
608 return repo[symbol]
608 return repo[symbol]
609
609
610 try:
610 try:
611 r = int(symbol)
611 r = int(symbol)
612 if b'%d' % r != symbol:
612 if b'%d' % r != symbol:
613 raise ValueError
613 raise ValueError
614 l = len(repo.changelog)
614 l = len(repo.changelog)
615 if r < 0:
615 if r < 0:
616 r += l
616 r += l
617 if r < 0 or r >= l and r != wdirrev:
617 if r < 0 or r >= l and r != wdirrev:
618 raise ValueError
618 raise ValueError
619 return repo[r]
619 return repo[r]
620 except error.FilteredIndexError:
620 except error.FilteredIndexError:
621 raise
621 raise
622 except (ValueError, OverflowError, IndexError):
622 except (ValueError, OverflowError, IndexError):
623 pass
623 pass
624
624
625 if len(symbol) == 2 * repo.nodeconstants.nodelen:
625 if len(symbol) == 2 * repo.nodeconstants.nodelen:
626 try:
626 try:
627 node = bin(symbol)
627 node = bin(symbol)
628 rev = repo.changelog.rev(node)
628 rev = repo.changelog.rev(node)
629 return repo[rev]
629 return repo[rev]
630 except error.FilteredLookupError:
630 except error.FilteredLookupError:
631 raise
631 raise
632 except (TypeError, LookupError):
632 except (TypeError, LookupError):
633 pass
633 pass
634
634
635 # look up bookmarks through the name interface
635 # look up bookmarks through the name interface
636 try:
636 try:
637 node = repo.names.singlenode(repo, symbol)
637 node = repo.names.singlenode(repo, symbol)
638 rev = repo.changelog.rev(node)
638 rev = repo.changelog.rev(node)
639 return repo[rev]
639 return repo[rev]
640 except KeyError:
640 except KeyError:
641 pass
641 pass
642
642
643 node = resolvehexnodeidprefix(repo, symbol)
643 node = resolvehexnodeidprefix(repo, symbol)
644 if node is not None:
644 if node is not None:
645 rev = repo.changelog.rev(node)
645 rev = repo.changelog.rev(node)
646 return repo[rev]
646 return repo[rev]
647
647
648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
649
649
650 except error.WdirUnsupported:
650 except error.WdirUnsupported:
651 return repo[None]
651 return repo[None]
652 except (
652 except (
653 error.FilteredIndexError,
653 error.FilteredIndexError,
654 error.FilteredLookupError,
654 error.FilteredLookupError,
655 error.FilteredRepoLookupError,
655 error.FilteredRepoLookupError,
656 ):
656 ):
657 raise _filterederror(repo, symbol)
657 raise _filterederror(repo, symbol)
658
658
659
659
660 def _filterederror(repo, changeid):
660 def _filterederror(repo, changeid):
661 """build an exception to be raised about a filtered changeid
661 """build an exception to be raised about a filtered changeid
662
662
663 This is extracted in a function to help extensions (eg: evolve) to
663 This is extracted in a function to help extensions (eg: evolve) to
664 experiment with various message variants."""
664 experiment with various message variants."""
665 if repo.filtername.startswith(b'visible'):
665 if repo.filtername.startswith(b'visible'):
666
666
667 # Check if the changeset is obsolete
667 # Check if the changeset is obsolete
668 unfilteredrepo = repo.unfiltered()
668 unfilteredrepo = repo.unfiltered()
669 ctx = revsymbol(unfilteredrepo, changeid)
669 ctx = revsymbol(unfilteredrepo, changeid)
670
670
671 # If the changeset is obsolete, enrich the message with the reason
671 # If the changeset is obsolete, enrich the message with the reason
672 # that made this changeset not visible
672 # that made this changeset not visible
673 if ctx.obsolete():
673 if ctx.obsolete():
674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
675 else:
675 else:
676 msg = _(b"hidden revision '%s'") % changeid
676 msg = _(b"hidden revision '%s'") % changeid
677
677
678 hint = _(b'use --hidden to access hidden revisions')
678 hint = _(b'use --hidden to access hidden revisions')
679
679
680 return error.FilteredRepoLookupError(msg, hint=hint)
680 return error.FilteredRepoLookupError(msg, hint=hint)
681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
682 msg %= (changeid, repo.filtername)
682 msg %= (changeid, repo.filtername)
683 return error.FilteredRepoLookupError(msg)
683 return error.FilteredRepoLookupError(msg)
684
684
685
685
686 def revsingle(repo, revspec, default=b'.', localalias=None):
686 def revsingle(repo, revspec, default=b'.', localalias=None):
687 if not revspec and revspec != 0:
687 if not revspec and revspec != 0:
688 return repo[default]
688 return repo[default]
689
689
690 l = revrange(repo, [revspec], localalias=localalias)
690 l = revrange(repo, [revspec], localalias=localalias)
691 if not l:
691 if not l:
692 raise error.Abort(_(b'empty revision set'))
692 raise error.Abort(_(b'empty revision set'))
693 return repo[l.last()]
693 return repo[l.last()]
694
694
695
695
696 def _pairspec(revspec):
696 def _pairspec(revspec):
697 tree = revsetlang.parse(revspec)
697 tree = revsetlang.parse(revspec)
698 return tree and tree[0] in (
698 return tree and tree[0] in (
699 b'range',
699 b'range',
700 b'rangepre',
700 b'rangepre',
701 b'rangepost',
701 b'rangepost',
702 b'rangeall',
702 b'rangeall',
703 )
703 )
704
704
705
705
706 def revpair(repo, revs):
706 def revpair(repo, revs):
707 if not revs:
707 if not revs:
708 return repo[b'.'], repo[None]
708 return repo[b'.'], repo[None]
709
709
710 l = revrange(repo, revs)
710 l = revrange(repo, revs)
711
711
712 if not l:
712 if not l:
713 raise error.Abort(_(b'empty revision range'))
713 raise error.Abort(_(b'empty revision range'))
714
714
715 first = l.first()
715 first = l.first()
716 second = l.last()
716 second = l.last()
717
717
718 if (
718 if (
719 first == second
719 first == second
720 and len(revs) >= 2
720 and len(revs) >= 2
721 and not all(revrange(repo, [r]) for r in revs)
721 and not all(revrange(repo, [r]) for r in revs)
722 ):
722 ):
723 raise error.Abort(_(b'empty revision on one side of range'))
723 raise error.Abort(_(b'empty revision on one side of range'))
724
724
725 # if top-level is range expression, the result must always be a pair
725 # if top-level is range expression, the result must always be a pair
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
727 return repo[first], repo[None]
727 return repo[first], repo[None]
728
728
729 return repo[first], repo[second]
729 return repo[first], repo[second]
730
730
731
731
732 def revrange(repo, specs, localalias=None):
732 def revrange(repo, specs, localalias=None):
733 """Execute 1 to many revsets and return the union.
733 """Execute 1 to many revsets and return the union.
734
734
735 This is the preferred mechanism for executing revsets using user-specified
735 This is the preferred mechanism for executing revsets using user-specified
736 config options, such as revset aliases.
736 config options, such as revset aliases.
737
737
738 The revsets specified by ``specs`` will be executed via a chained ``OR``
738 The revsets specified by ``specs`` will be executed via a chained ``OR``
739 expression. If ``specs`` is empty, an empty result is returned.
739 expression. If ``specs`` is empty, an empty result is returned.
740
740
741 ``specs`` can contain integers, in which case they are assumed to be
741 ``specs`` can contain integers, in which case they are assumed to be
742 revision numbers.
742 revision numbers.
743
743
744 It is assumed the revsets are already formatted. If you have arguments
744 It is assumed the revsets are already formatted. If you have arguments
745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
746 and pass the result as an element of ``specs``.
746 and pass the result as an element of ``specs``.
747
747
748 Specifying a single revset is allowed.
748 Specifying a single revset is allowed.
749
749
750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
751 integer revisions.
751 integer revisions.
752 """
752 """
753 allspecs = []
753 allspecs = []
754 for spec in specs:
754 for spec in specs:
755 if isinstance(spec, int):
755 if isinstance(spec, int):
756 spec = revsetlang.formatspec(b'%d', spec)
756 spec = revsetlang.formatspec(b'%d', spec)
757 allspecs.append(spec)
757 allspecs.append(spec)
758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
759
759
760
760
761 def increasingwindows(windowsize=8, sizelimit=512):
761 def increasingwindows(windowsize=8, sizelimit=512):
762 while True:
762 while True:
763 yield windowsize
763 yield windowsize
764 if windowsize < sizelimit:
764 if windowsize < sizelimit:
765 windowsize *= 2
765 windowsize *= 2
766
766
767
767
768 def walkchangerevs(repo, revs, makefilematcher, prepare):
768 def walkchangerevs(repo, revs, makefilematcher, prepare):
769 """Iterate over files and the revs in a "windowed" way.
769 """Iterate over files and the revs in a "windowed" way.
770
770
771 Callers most commonly need to iterate backwards over the history
771 Callers most commonly need to iterate backwards over the history
772 in which they are interested. Doing so has awful (quadratic-looking)
772 in which they are interested. Doing so has awful (quadratic-looking)
773 performance, so we use iterators in a "windowed" way.
773 performance, so we use iterators in a "windowed" way.
774
774
775 We walk a window of revisions in the desired order. Within the
775 We walk a window of revisions in the desired order. Within the
776 window, we first walk forwards to gather data, then in the desired
776 window, we first walk forwards to gather data, then in the desired
777 order (usually backwards) to display it.
777 order (usually backwards) to display it.
778
778
779 This function returns an iterator yielding contexts. Before
779 This function returns an iterator yielding contexts. Before
780 yielding each context, the iterator will first call the prepare
780 yielding each context, the iterator will first call the prepare
781 function on each context in the window in forward order."""
781 function on each context in the window in forward order."""
782
782
783 if not revs:
783 if not revs:
784 return []
784 return []
785 change = repo.__getitem__
785 change = repo.__getitem__
786
786
787 def iterate():
787 def iterate():
788 it = iter(revs)
788 it = iter(revs)
789 stopiteration = False
789 stopiteration = False
790 for windowsize in increasingwindows():
790 for windowsize in increasingwindows():
791 nrevs = []
791 nrevs = []
792 for i in pycompat.xrange(windowsize):
792 for i in pycompat.xrange(windowsize):
793 rev = next(it, None)
793 rev = next(it, None)
794 if rev is None:
794 if rev is None:
795 stopiteration = True
795 stopiteration = True
796 break
796 break
797 nrevs.append(rev)
797 nrevs.append(rev)
798 for rev in sorted(nrevs):
798 for rev in sorted(nrevs):
799 ctx = change(rev)
799 ctx = change(rev)
800 prepare(ctx, makefilematcher(ctx))
800 prepare(ctx, makefilematcher(ctx))
801 for rev in nrevs:
801 for rev in nrevs:
802 yield change(rev)
802 yield change(rev)
803
803
804 if stopiteration:
804 if stopiteration:
805 break
805 break
806
806
807 return iterate()
807 return iterate()
808
808
809
809
810 def meaningfulparents(repo, ctx):
810 def meaningfulparents(repo, ctx):
811 """Return list of meaningful (or all if debug) parentrevs for rev.
811 """Return list of meaningful (or all if debug) parentrevs for rev.
812
812
813 For merges (two non-nullrev revisions) both parents are meaningful.
813 For merges (two non-nullrev revisions) both parents are meaningful.
814 Otherwise the first parent revision is considered meaningful if it
814 Otherwise the first parent revision is considered meaningful if it
815 is not the preceding revision.
815 is not the preceding revision.
816 """
816 """
817 parents = ctx.parents()
817 parents = ctx.parents()
818 if len(parents) > 1:
818 if len(parents) > 1:
819 return parents
819 return parents
820 if repo.ui.debugflag:
820 if repo.ui.debugflag:
821 return [parents[0], repo[nullrev]]
821 return [parents[0], repo[nullrev]]
822 if parents[0].rev() >= intrev(ctx) - 1:
822 if parents[0].rev() >= intrev(ctx) - 1:
823 return []
823 return []
824 return parents
824 return parents
825
825
826
826
827 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
827 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
828 """Return a function that produced paths for presenting to the user.
828 """Return a function that produced paths for presenting to the user.
829
829
830 The returned function takes a repo-relative path and produces a path
830 The returned function takes a repo-relative path and produces a path
831 that can be presented in the UI.
831 that can be presented in the UI.
832
832
833 Depending on the value of ui.relative-paths, either a repo-relative or
833 Depending on the value of ui.relative-paths, either a repo-relative or
834 cwd-relative path will be produced.
834 cwd-relative path will be produced.
835
835
836 legacyrelativevalue is the value to use if ui.relative-paths=legacy
836 legacyrelativevalue is the value to use if ui.relative-paths=legacy
837
837
838 If forcerelativevalue is not None, then that value will be used regardless
838 If forcerelativevalue is not None, then that value will be used regardless
839 of what ui.relative-paths is set to.
839 of what ui.relative-paths is set to.
840 """
840 """
841 if forcerelativevalue is not None:
841 if forcerelativevalue is not None:
842 relative = forcerelativevalue
842 relative = forcerelativevalue
843 else:
843 else:
844 config = repo.ui.config(b'ui', b'relative-paths')
844 config = repo.ui.config(b'ui', b'relative-paths')
845 if config == b'legacy':
845 if config == b'legacy':
846 relative = legacyrelativevalue
846 relative = legacyrelativevalue
847 else:
847 else:
848 relative = stringutil.parsebool(config)
848 relative = stringutil.parsebool(config)
849 if relative is None:
849 if relative is None:
850 raise error.ConfigError(
850 raise error.ConfigError(
851 _(b"ui.relative-paths is not a boolean ('%s')") % config
851 _(b"ui.relative-paths is not a boolean ('%s')") % config
852 )
852 )
853
853
854 if relative:
854 if relative:
855 cwd = repo.getcwd()
855 cwd = repo.getcwd()
856 if cwd != b'':
856 if cwd != b'':
857 # this branch would work even if cwd == b'' (ie cwd = repo
857 # this branch would work even if cwd == b'' (ie cwd = repo
858 # root), but its generality makes the returned function slower
858 # root), but its generality makes the returned function slower
859 pathto = repo.pathto
859 pathto = repo.pathto
860 return lambda f: pathto(f, cwd)
860 return lambda f: pathto(f, cwd)
861 if repo.ui.configbool(b'ui', b'slash'):
861 if repo.ui.configbool(b'ui', b'slash'):
862 return lambda f: f
862 return lambda f: f
863 else:
863 else:
864 return util.localpath
864 return util.localpath
865
865
866
866
867 def subdiruipathfn(subpath, uipathfn):
867 def subdiruipathfn(subpath, uipathfn):
868 '''Create a new uipathfn that treats the file as relative to subpath.'''
868 '''Create a new uipathfn that treats the file as relative to subpath.'''
869 return lambda f: uipathfn(posixpath.join(subpath, f))
869 return lambda f: uipathfn(posixpath.join(subpath, f))
870
870
871
871
872 def anypats(pats, opts):
872 def anypats(pats, opts):
873 """Checks if any patterns, including --include and --exclude were given.
873 """Checks if any patterns, including --include and --exclude were given.
874
874
875 Some commands (e.g. addremove) use this condition for deciding whether to
875 Some commands (e.g. addremove) use this condition for deciding whether to
876 print absolute or relative paths.
876 print absolute or relative paths.
877 """
877 """
878 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
878 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
879
879
880
880
881 def expandpats(pats):
881 def expandpats(pats):
882 """Expand bare globs when running on windows.
882 """Expand bare globs when running on windows.
883 On posix we assume it already has already been done by sh."""
883 On posix we assume it already has already been done by sh."""
884 if not util.expandglobs:
884 if not util.expandglobs:
885 return list(pats)
885 return list(pats)
886 ret = []
886 ret = []
887 for kindpat in pats:
887 for kindpat in pats:
888 kind, pat = matchmod._patsplit(kindpat, None)
888 kind, pat = matchmod._patsplit(kindpat, None)
889 if kind is None:
889 if kind is None:
890 try:
890 try:
891 globbed = glob.glob(pat)
891 globbed = glob.glob(pat)
892 except re.error:
892 except re.error:
893 globbed = [pat]
893 globbed = [pat]
894 if globbed:
894 if globbed:
895 ret.extend(globbed)
895 ret.extend(globbed)
896 continue
896 continue
897 ret.append(kindpat)
897 ret.append(kindpat)
898 return ret
898 return ret
899
899
900
900
901 def matchandpats(
901 def matchandpats(
902 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
902 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
903 ):
903 ):
904 """Return a matcher and the patterns that were used.
904 """Return a matcher and the patterns that were used.
905 The matcher will warn about bad matches, unless an alternate badfn callback
905 The matcher will warn about bad matches, unless an alternate badfn callback
906 is provided."""
906 is provided."""
907 if opts is None:
907 if opts is None:
908 opts = {}
908 opts = {}
909 if not globbed and default == b'relpath':
909 if not globbed and default == b'relpath':
910 pats = expandpats(pats or [])
910 pats = expandpats(pats or [])
911
911
912 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
912 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
913
913
914 def bad(f, msg):
914 def bad(f, msg):
915 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
915 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
916
916
917 if badfn is None:
917 if badfn is None:
918 badfn = bad
918 badfn = bad
919
919
920 m = ctx.match(
920 m = ctx.match(
921 pats,
921 pats,
922 opts.get(b'include'),
922 opts.get(b'include'),
923 opts.get(b'exclude'),
923 opts.get(b'exclude'),
924 default,
924 default,
925 listsubrepos=opts.get(b'subrepos'),
925 listsubrepos=opts.get(b'subrepos'),
926 badfn=badfn,
926 badfn=badfn,
927 )
927 )
928
928
929 if m.always():
929 if m.always():
930 pats = []
930 pats = []
931 return m, pats
931 return m, pats
932
932
933
933
934 def match(
934 def match(
935 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
935 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
936 ):
936 ):
937 '''Return a matcher that will warn about bad matches.'''
937 '''Return a matcher that will warn about bad matches.'''
938 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
938 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
939
939
940
940
941 def matchall(repo):
941 def matchall(repo):
942 '''Return a matcher that will efficiently match everything.'''
942 '''Return a matcher that will efficiently match everything.'''
943 return matchmod.always()
943 return matchmod.always()
944
944
945
945
946 def matchfiles(repo, files, badfn=None):
946 def matchfiles(repo, files, badfn=None):
947 '''Return a matcher that will efficiently match exactly these files.'''
947 '''Return a matcher that will efficiently match exactly these files.'''
948 return matchmod.exact(files, badfn=badfn)
948 return matchmod.exact(files, badfn=badfn)
949
949
950
950
951 def parsefollowlinespattern(repo, rev, pat, msg):
951 def parsefollowlinespattern(repo, rev, pat, msg):
952 """Return a file name from `pat` pattern suitable for usage in followlines
952 """Return a file name from `pat` pattern suitable for usage in followlines
953 logic.
953 logic.
954 """
954 """
955 if not matchmod.patkind(pat):
955 if not matchmod.patkind(pat):
956 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
956 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
957 else:
957 else:
958 ctx = repo[rev]
958 ctx = repo[rev]
959 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
959 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
960 files = [f for f in ctx if m(f)]
960 files = [f for f in ctx if m(f)]
961 if len(files) != 1:
961 if len(files) != 1:
962 raise error.ParseError(msg)
962 raise error.ParseError(msg)
963 return files[0]
963 return files[0]
964
964
965
965
966 def getorigvfs(ui, repo):
966 def getorigvfs(ui, repo):
967 """return a vfs suitable to save 'orig' file
967 """return a vfs suitable to save 'orig' file
968
968
969 return None if no special directory is configured"""
969 return None if no special directory is configured"""
970 origbackuppath = ui.config(b'ui', b'origbackuppath')
970 origbackuppath = ui.config(b'ui', b'origbackuppath')
971 if not origbackuppath:
971 if not origbackuppath:
972 return None
972 return None
973 return vfs.vfs(repo.wvfs.join(origbackuppath))
973 return vfs.vfs(repo.wvfs.join(origbackuppath))
974
974
975
975
976 def backuppath(ui, repo, filepath):
976 def backuppath(ui, repo, filepath):
977 """customize where working copy backup files (.orig files) are created
977 """customize where working copy backup files (.orig files) are created
978
978
979 Fetch user defined path from config file: [ui] origbackuppath = <path>
979 Fetch user defined path from config file: [ui] origbackuppath = <path>
980 Fall back to default (filepath with .orig suffix) if not specified
980 Fall back to default (filepath with .orig suffix) if not specified
981
981
982 filepath is repo-relative
982 filepath is repo-relative
983
983
984 Returns an absolute path
984 Returns an absolute path
985 """
985 """
986 origvfs = getorigvfs(ui, repo)
986 origvfs = getorigvfs(ui, repo)
987 if origvfs is None:
987 if origvfs is None:
988 return repo.wjoin(filepath + b".orig")
988 return repo.wjoin(filepath + b".orig")
989
989
990 origbackupdir = origvfs.dirname(filepath)
990 origbackupdir = origvfs.dirname(filepath)
991 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
991 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
992 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
992 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
993
993
994 # Remove any files that conflict with the backup file's path
994 # Remove any files that conflict with the backup file's path
995 for f in reversed(list(pathutil.finddirs(filepath))):
995 for f in reversed(list(pathutil.finddirs(filepath))):
996 if origvfs.isfileorlink(f):
996 if origvfs.isfileorlink(f):
997 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
997 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
998 origvfs.unlink(f)
998 origvfs.unlink(f)
999 break
999 break
1000
1000
1001 origvfs.makedirs(origbackupdir)
1001 origvfs.makedirs(origbackupdir)
1002
1002
1003 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1003 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1004 ui.note(
1004 ui.note(
1005 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1005 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1006 )
1006 )
1007 origvfs.rmtree(filepath, forcibly=True)
1007 origvfs.rmtree(filepath, forcibly=True)
1008
1008
1009 return origvfs.join(filepath)
1009 return origvfs.join(filepath)
1010
1010
1011
1011
1012 class _containsnode(object):
1012 class _containsnode(object):
1013 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1013 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1014
1014
1015 def __init__(self, repo, revcontainer):
1015 def __init__(self, repo, revcontainer):
1016 self._torev = repo.changelog.rev
1016 self._torev = repo.changelog.rev
1017 self._revcontains = revcontainer.__contains__
1017 self._revcontains = revcontainer.__contains__
1018
1018
1019 def __contains__(self, node):
1019 def __contains__(self, node):
1020 return self._revcontains(self._torev(node))
1020 return self._revcontains(self._torev(node))
1021
1021
1022
1022
1023 def cleanupnodes(
1023 def cleanupnodes(
1024 repo,
1024 repo,
1025 replacements,
1025 replacements,
1026 operation,
1026 operation,
1027 moves=None,
1027 moves=None,
1028 metadata=None,
1028 metadata=None,
1029 fixphase=False,
1029 fixphase=False,
1030 targetphase=None,
1030 targetphase=None,
1031 backup=True,
1031 backup=True,
1032 ):
1032 ):
1033 """do common cleanups when old nodes are replaced by new nodes
1033 """do common cleanups when old nodes are replaced by new nodes
1034
1034
1035 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1035 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1036 (we might also want to move working directory parent in the future)
1036 (we might also want to move working directory parent in the future)
1037
1037
1038 By default, bookmark moves are calculated automatically from 'replacements',
1038 By default, bookmark moves are calculated automatically from 'replacements',
1039 but 'moves' can be used to override that. Also, 'moves' may include
1039 but 'moves' can be used to override that. Also, 'moves' may include
1040 additional bookmark moves that should not have associated obsmarkers.
1040 additional bookmark moves that should not have associated obsmarkers.
1041
1041
1042 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1042 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1043 have replacements. operation is a string, like "rebase".
1043 have replacements. operation is a string, like "rebase".
1044
1044
1045 metadata is dictionary containing metadata to be stored in obsmarker if
1045 metadata is dictionary containing metadata to be stored in obsmarker if
1046 obsolescence is enabled.
1046 obsolescence is enabled.
1047 """
1047 """
1048 assert fixphase or targetphase is None
1048 assert fixphase or targetphase is None
1049 if not replacements and not moves:
1049 if not replacements and not moves:
1050 return
1050 return
1051
1051
1052 # translate mapping's other forms
1052 # translate mapping's other forms
1053 if not util.safehasattr(replacements, b'items'):
1053 if not util.safehasattr(replacements, b'items'):
1054 replacements = {(n,): () for n in replacements}
1054 replacements = {(n,): () for n in replacements}
1055 else:
1055 else:
1056 # upgrading non tuple "source" to tuple ones for BC
1056 # upgrading non tuple "source" to tuple ones for BC
1057 repls = {}
1057 repls = {}
1058 for key, value in replacements.items():
1058 for key, value in replacements.items():
1059 if not isinstance(key, tuple):
1059 if not isinstance(key, tuple):
1060 key = (key,)
1060 key = (key,)
1061 repls[key] = value
1061 repls[key] = value
1062 replacements = repls
1062 replacements = repls
1063
1063
1064 # Unfiltered repo is needed since nodes in replacements might be hidden.
1064 # Unfiltered repo is needed since nodes in replacements might be hidden.
1065 unfi = repo.unfiltered()
1065 unfi = repo.unfiltered()
1066
1066
1067 # Calculate bookmark movements
1067 # Calculate bookmark movements
1068 if moves is None:
1068 if moves is None:
1069 moves = {}
1069 moves = {}
1070 for oldnodes, newnodes in replacements.items():
1070 for oldnodes, newnodes in replacements.items():
1071 for oldnode in oldnodes:
1071 for oldnode in oldnodes:
1072 if oldnode in moves:
1072 if oldnode in moves:
1073 continue
1073 continue
1074 if len(newnodes) > 1:
1074 if len(newnodes) > 1:
1075 # usually a split, take the one with biggest rev number
1075 # usually a split, take the one with biggest rev number
1076 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1076 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1077 elif len(newnodes) == 0:
1077 elif len(newnodes) == 0:
1078 # move bookmark backwards
1078 # move bookmark backwards
1079 allreplaced = []
1079 allreplaced = []
1080 for rep in replacements:
1080 for rep in replacements:
1081 allreplaced.extend(rep)
1081 allreplaced.extend(rep)
1082 roots = list(
1082 roots = list(
1083 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1083 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1084 )
1084 )
1085 if roots:
1085 if roots:
1086 newnode = roots[0].node()
1086 newnode = roots[0].node()
1087 else:
1087 else:
1088 newnode = repo.nullid
1088 newnode = repo.nullid
1089 else:
1089 else:
1090 newnode = newnodes[0]
1090 newnode = newnodes[0]
1091 moves[oldnode] = newnode
1091 moves[oldnode] = newnode
1092
1092
1093 allnewnodes = [n for ns in replacements.values() for n in ns]
1093 allnewnodes = [n for ns in replacements.values() for n in ns]
1094 toretract = {}
1094 toretract = {}
1095 toadvance = {}
1095 toadvance = {}
1096 if fixphase:
1096 if fixphase:
1097 precursors = {}
1097 precursors = {}
1098 for oldnodes, newnodes in replacements.items():
1098 for oldnodes, newnodes in replacements.items():
1099 for oldnode in oldnodes:
1099 for oldnode in oldnodes:
1100 for newnode in newnodes:
1100 for newnode in newnodes:
1101 precursors.setdefault(newnode, []).append(oldnode)
1101 precursors.setdefault(newnode, []).append(oldnode)
1102
1102
1103 allnewnodes.sort(key=lambda n: unfi[n].rev())
1103 allnewnodes.sort(key=lambda n: unfi[n].rev())
1104 newphases = {}
1104 newphases = {}
1105
1105
1106 def phase(ctx):
1106 def phase(ctx):
1107 return newphases.get(ctx.node(), ctx.phase())
1107 return newphases.get(ctx.node(), ctx.phase())
1108
1108
1109 for newnode in allnewnodes:
1109 for newnode in allnewnodes:
1110 ctx = unfi[newnode]
1110 ctx = unfi[newnode]
1111 parentphase = max(phase(p) for p in ctx.parents())
1111 parentphase = max(phase(p) for p in ctx.parents())
1112 if targetphase is None:
1112 if targetphase is None:
1113 oldphase = max(
1113 oldphase = max(
1114 unfi[oldnode].phase() for oldnode in precursors[newnode]
1114 unfi[oldnode].phase() for oldnode in precursors[newnode]
1115 )
1115 )
1116 newphase = max(oldphase, parentphase)
1116 newphase = max(oldphase, parentphase)
1117 else:
1117 else:
1118 newphase = max(targetphase, parentphase)
1118 newphase = max(targetphase, parentphase)
1119 newphases[newnode] = newphase
1119 newphases[newnode] = newphase
1120 if newphase > ctx.phase():
1120 if newphase > ctx.phase():
1121 toretract.setdefault(newphase, []).append(newnode)
1121 toretract.setdefault(newphase, []).append(newnode)
1122 elif newphase < ctx.phase():
1122 elif newphase < ctx.phase():
1123 toadvance.setdefault(newphase, []).append(newnode)
1123 toadvance.setdefault(newphase, []).append(newnode)
1124
1124
1125 with repo.transaction(b'cleanup') as tr:
1125 with repo.transaction(b'cleanup') as tr:
1126 # Move bookmarks
1126 # Move bookmarks
1127 bmarks = repo._bookmarks
1127 bmarks = repo._bookmarks
1128 bmarkchanges = []
1128 bmarkchanges = []
1129 for oldnode, newnode in moves.items():
1129 for oldnode, newnode in moves.items():
1130 oldbmarks = repo.nodebookmarks(oldnode)
1130 oldbmarks = repo.nodebookmarks(oldnode)
1131 if not oldbmarks:
1131 if not oldbmarks:
1132 continue
1132 continue
1133 from . import bookmarks # avoid import cycle
1133 from . import bookmarks # avoid import cycle
1134
1134
1135 repo.ui.debug(
1135 repo.ui.debug(
1136 b'moving bookmarks %r from %s to %s\n'
1136 b'moving bookmarks %r from %s to %s\n'
1137 % (
1137 % (
1138 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1138 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1139 hex(oldnode),
1139 hex(oldnode),
1140 hex(newnode),
1140 hex(newnode),
1141 )
1141 )
1142 )
1142 )
1143 # Delete divergent bookmarks being parents of related newnodes
1143 # Delete divergent bookmarks being parents of related newnodes
1144 deleterevs = repo.revs(
1144 deleterevs = repo.revs(
1145 b'parents(roots(%ln & (::%n))) - parents(%n)',
1145 b'parents(roots(%ln & (::%n))) - parents(%n)',
1146 allnewnodes,
1146 allnewnodes,
1147 newnode,
1147 newnode,
1148 oldnode,
1148 oldnode,
1149 )
1149 )
1150 deletenodes = _containsnode(repo, deleterevs)
1150 deletenodes = _containsnode(repo, deleterevs)
1151 for name in oldbmarks:
1151 for name in oldbmarks:
1152 bmarkchanges.append((name, newnode))
1152 bmarkchanges.append((name, newnode))
1153 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1153 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1154 bmarkchanges.append((b, None))
1154 bmarkchanges.append((b, None))
1155
1155
1156 if bmarkchanges:
1156 if bmarkchanges:
1157 bmarks.applychanges(repo, tr, bmarkchanges)
1157 bmarks.applychanges(repo, tr, bmarkchanges)
1158
1158
1159 for phase, nodes in toretract.items():
1159 for phase, nodes in toretract.items():
1160 phases.retractboundary(repo, tr, phase, nodes)
1160 phases.retractboundary(repo, tr, phase, nodes)
1161 for phase, nodes in toadvance.items():
1161 for phase, nodes in toadvance.items():
1162 phases.advanceboundary(repo, tr, phase, nodes)
1162 phases.advanceboundary(repo, tr, phase, nodes)
1163
1163
1164 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1164 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1165 # Obsolete or strip nodes
1165 # Obsolete or strip nodes
1166 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1166 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1167 # If a node is already obsoleted, and we want to obsolete it
1167 # If a node is already obsoleted, and we want to obsolete it
1168 # without a successor, skip that obssolete request since it's
1168 # without a successor, skip that obssolete request since it's
1169 # unnecessary. That's the "if s or not isobs(n)" check below.
1169 # unnecessary. That's the "if s or not isobs(n)" check below.
1170 # Also sort the node in topology order, that might be useful for
1170 # Also sort the node in topology order, that might be useful for
1171 # some obsstore logic.
1171 # some obsstore logic.
1172 # NOTE: the sorting might belong to createmarkers.
1172 # NOTE: the sorting might belong to createmarkers.
1173 torev = unfi.changelog.rev
1173 torev = unfi.changelog.rev
1174 sortfunc = lambda ns: torev(ns[0][0])
1174 sortfunc = lambda ns: torev(ns[0][0])
1175 rels = []
1175 rels = []
1176 for ns, s in sorted(replacements.items(), key=sortfunc):
1176 for ns, s in sorted(replacements.items(), key=sortfunc):
1177 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1177 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1178 rels.append(rel)
1178 rels.append(rel)
1179 if rels:
1179 if rels:
1180 obsolete.createmarkers(
1180 obsolete.createmarkers(
1181 repo, rels, operation=operation, metadata=metadata
1181 repo, rels, operation=operation, metadata=metadata
1182 )
1182 )
1183 elif phases.supportinternal(repo) and mayusearchived:
1183 elif phases.supportinternal(repo) and mayusearchived:
1184 # this assume we do not have "unstable" nodes above the cleaned ones
1184 # this assume we do not have "unstable" nodes above the cleaned ones
1185 allreplaced = set()
1185 allreplaced = set()
1186 for ns in replacements.keys():
1186 for ns in replacements.keys():
1187 allreplaced.update(ns)
1187 allreplaced.update(ns)
1188 if backup:
1188 if backup:
1189 from . import repair # avoid import cycle
1189 from . import repair # avoid import cycle
1190
1190
1191 node = min(allreplaced, key=repo.changelog.rev)
1191 node = min(allreplaced, key=repo.changelog.rev)
1192 repair.backupbundle(
1192 repair.backupbundle(
1193 repo, allreplaced, allreplaced, node, operation
1193 repo, allreplaced, allreplaced, node, operation
1194 )
1194 )
1195 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1195 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1196 else:
1196 else:
1197 from . import repair # avoid import cycle
1197 from . import repair # avoid import cycle
1198
1198
1199 tostrip = list(n for ns in replacements for n in ns)
1199 tostrip = list(n for ns in replacements for n in ns)
1200 if tostrip:
1200 if tostrip:
1201 repair.delayedstrip(
1201 repair.delayedstrip(
1202 repo.ui, repo, tostrip, operation, backup=backup
1202 repo.ui, repo, tostrip, operation, backup=backup
1203 )
1203 )
1204
1204
1205
1205
1206 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1206 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1207 if opts is None:
1207 if opts is None:
1208 opts = {}
1208 opts = {}
1209 m = matcher
1209 m = matcher
1210 dry_run = opts.get(b'dry_run')
1210 dry_run = opts.get(b'dry_run')
1211 try:
1211 try:
1212 similarity = float(opts.get(b'similarity') or 0)
1212 similarity = float(opts.get(b'similarity') or 0)
1213 except ValueError:
1213 except ValueError:
1214 raise error.Abort(_(b'similarity must be a number'))
1214 raise error.Abort(_(b'similarity must be a number'))
1215 if similarity < 0 or similarity > 100:
1215 if similarity < 0 or similarity > 100:
1216 raise error.Abort(_(b'similarity must be between 0 and 100'))
1216 raise error.Abort(_(b'similarity must be between 0 and 100'))
1217 similarity /= 100.0
1217 similarity /= 100.0
1218
1218
1219 ret = 0
1219 ret = 0
1220
1220
1221 wctx = repo[None]
1221 wctx = repo[None]
1222 for subpath in sorted(wctx.substate):
1222 for subpath in sorted(wctx.substate):
1223 submatch = matchmod.subdirmatcher(subpath, m)
1223 submatch = matchmod.subdirmatcher(subpath, m)
1224 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1224 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1225 sub = wctx.sub(subpath)
1225 sub = wctx.sub(subpath)
1226 subprefix = repo.wvfs.reljoin(prefix, subpath)
1226 subprefix = repo.wvfs.reljoin(prefix, subpath)
1227 subuipathfn = subdiruipathfn(subpath, uipathfn)
1227 subuipathfn = subdiruipathfn(subpath, uipathfn)
1228 try:
1228 try:
1229 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1229 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1230 ret = 1
1230 ret = 1
1231 except error.LookupError:
1231 except error.LookupError:
1232 repo.ui.status(
1232 repo.ui.status(
1233 _(b"skipping missing subrepository: %s\n")
1233 _(b"skipping missing subrepository: %s\n")
1234 % uipathfn(subpath)
1234 % uipathfn(subpath)
1235 )
1235 )
1236
1236
1237 rejected = []
1237 rejected = []
1238
1238
1239 def badfn(f, msg):
1239 def badfn(f, msg):
1240 if f in m.files():
1240 if f in m.files():
1241 m.bad(f, msg)
1241 m.bad(f, msg)
1242 rejected.append(f)
1242 rejected.append(f)
1243
1243
1244 badmatch = matchmod.badmatch(m, badfn)
1244 badmatch = matchmod.badmatch(m, badfn)
1245 added, unknown, deleted, removed, forgotten = _interestingfiles(
1245 added, unknown, deleted, removed, forgotten = _interestingfiles(
1246 repo, badmatch
1246 repo, badmatch
1247 )
1247 )
1248
1248
1249 unknownset = set(unknown + forgotten)
1249 unknownset = set(unknown + forgotten)
1250 toprint = unknownset.copy()
1250 toprint = unknownset.copy()
1251 toprint.update(deleted)
1251 toprint.update(deleted)
1252 for abs in sorted(toprint):
1252 for abs in sorted(toprint):
1253 if repo.ui.verbose or not m.exact(abs):
1253 if repo.ui.verbose or not m.exact(abs):
1254 if abs in unknownset:
1254 if abs in unknownset:
1255 status = _(b'adding %s\n') % uipathfn(abs)
1255 status = _(b'adding %s\n') % uipathfn(abs)
1256 label = b'ui.addremove.added'
1256 label = b'ui.addremove.added'
1257 else:
1257 else:
1258 status = _(b'removing %s\n') % uipathfn(abs)
1258 status = _(b'removing %s\n') % uipathfn(abs)
1259 label = b'ui.addremove.removed'
1259 label = b'ui.addremove.removed'
1260 repo.ui.status(status, label=label)
1260 repo.ui.status(status, label=label)
1261
1261
1262 renames = _findrenames(
1262 renames = _findrenames(
1263 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1263 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1264 )
1264 )
1265
1265
1266 if not dry_run:
1266 if not dry_run:
1267 _markchanges(repo, unknown + forgotten, deleted, renames)
1267 _markchanges(repo, unknown + forgotten, deleted, renames)
1268
1268
1269 for f in rejected:
1269 for f in rejected:
1270 if f in m.files():
1270 if f in m.files():
1271 return 1
1271 return 1
1272 return ret
1272 return ret
1273
1273
1274
1274
1275 def marktouched(repo, files, similarity=0.0):
1275 def marktouched(repo, files, similarity=0.0):
1276 """Assert that files have somehow been operated upon. files are relative to
1276 """Assert that files have somehow been operated upon. files are relative to
1277 the repo root."""
1277 the repo root."""
1278 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1278 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1279 rejected = []
1279 rejected = []
1280
1280
1281 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1281 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1282
1282
1283 if repo.ui.verbose:
1283 if repo.ui.verbose:
1284 unknownset = set(unknown + forgotten)
1284 unknownset = set(unknown + forgotten)
1285 toprint = unknownset.copy()
1285 toprint = unknownset.copy()
1286 toprint.update(deleted)
1286 toprint.update(deleted)
1287 for abs in sorted(toprint):
1287 for abs in sorted(toprint):
1288 if abs in unknownset:
1288 if abs in unknownset:
1289 status = _(b'adding %s\n') % abs
1289 status = _(b'adding %s\n') % abs
1290 else:
1290 else:
1291 status = _(b'removing %s\n') % abs
1291 status = _(b'removing %s\n') % abs
1292 repo.ui.status(status)
1292 repo.ui.status(status)
1293
1293
1294 # TODO: We should probably have the caller pass in uipathfn and apply it to
1294 # TODO: We should probably have the caller pass in uipathfn and apply it to
1295 # the messages above too. legacyrelativevalue=True is consistent with how
1295 # the messages above too. legacyrelativevalue=True is consistent with how
1296 # it used to work.
1296 # it used to work.
1297 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1297 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1298 renames = _findrenames(
1298 renames = _findrenames(
1299 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1299 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1300 )
1300 )
1301
1301
1302 _markchanges(repo, unknown + forgotten, deleted, renames)
1302 _markchanges(repo, unknown + forgotten, deleted, renames)
1303
1303
1304 for f in rejected:
1304 for f in rejected:
1305 if f in m.files():
1305 if f in m.files():
1306 return 1
1306 return 1
1307 return 0
1307 return 0
1308
1308
1309
1309
1310 def _interestingfiles(repo, matcher):
1310 def _interestingfiles(repo, matcher):
1311 """Walk dirstate with matcher, looking for files that addremove would care
1311 """Walk dirstate with matcher, looking for files that addremove would care
1312 about.
1312 about.
1313
1313
1314 This is different from dirstate.status because it doesn't care about
1314 This is different from dirstate.status because it doesn't care about
1315 whether files are modified or clean."""
1315 whether files are modified or clean."""
1316 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1316 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1317 audit_path = pathutil.pathauditor(repo.root, cached=True)
1317 audit_path = pathutil.pathauditor(repo.root, cached=True)
1318
1318
1319 ctx = repo[None]
1319 ctx = repo[None]
1320 dirstate = repo.dirstate
1320 dirstate = repo.dirstate
1321 matcher = repo.narrowmatch(matcher, includeexact=True)
1321 matcher = repo.narrowmatch(matcher, includeexact=True)
1322 walkresults = dirstate.walk(
1322 walkresults = dirstate.walk(
1323 matcher,
1323 matcher,
1324 subrepos=sorted(ctx.substate),
1324 subrepos=sorted(ctx.substate),
1325 unknown=True,
1325 unknown=True,
1326 ignored=False,
1326 ignored=False,
1327 full=False,
1327 full=False,
1328 )
1328 )
1329 for abs, st in pycompat.iteritems(walkresults):
1329 for abs, st in pycompat.iteritems(walkresults):
1330 dstate = dirstate[abs]
1330 dstate = dirstate[abs]
1331 if dstate == b'?' and audit_path.check(abs):
1331 if dstate == b'?' and audit_path.check(abs):
1332 unknown.append(abs)
1332 unknown.append(abs)
1333 elif dstate != b'r' and not st:
1333 elif dstate != b'r' and not st:
1334 deleted.append(abs)
1334 deleted.append(abs)
1335 elif dstate == b'r' and st:
1335 elif dstate == b'r' and st:
1336 forgotten.append(abs)
1336 forgotten.append(abs)
1337 # for finding renames
1337 # for finding renames
1338 elif dstate == b'r' and not st:
1338 elif dstate == b'r' and not st:
1339 removed.append(abs)
1339 removed.append(abs)
1340 elif dstate == b'a':
1340 elif dstate == b'a':
1341 added.append(abs)
1341 added.append(abs)
1342
1342
1343 return added, unknown, deleted, removed, forgotten
1343 return added, unknown, deleted, removed, forgotten
1344
1344
1345
1345
1346 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1346 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1347 '''Find renames from removed files to added ones.'''
1347 '''Find renames from removed files to added ones.'''
1348 renames = {}
1348 renames = {}
1349 if similarity > 0:
1349 if similarity > 0:
1350 for old, new, score in similar.findrenames(
1350 for old, new, score in similar.findrenames(
1351 repo, added, removed, similarity
1351 repo, added, removed, similarity
1352 ):
1352 ):
1353 if (
1353 if (
1354 repo.ui.verbose
1354 repo.ui.verbose
1355 or not matcher.exact(old)
1355 or not matcher.exact(old)
1356 or not matcher.exact(new)
1356 or not matcher.exact(new)
1357 ):
1357 ):
1358 repo.ui.status(
1358 repo.ui.status(
1359 _(
1359 _(
1360 b'recording removal of %s as rename to %s '
1360 b'recording removal of %s as rename to %s '
1361 b'(%d%% similar)\n'
1361 b'(%d%% similar)\n'
1362 )
1362 )
1363 % (uipathfn(old), uipathfn(new), score * 100)
1363 % (uipathfn(old), uipathfn(new), score * 100)
1364 )
1364 )
1365 renames[new] = old
1365 renames[new] = old
1366 return renames
1366 return renames
1367
1367
1368
1368
1369 def _markchanges(repo, unknown, deleted, renames):
1369 def _markchanges(repo, unknown, deleted, renames):
1370 """Marks the files in unknown as added, the files in deleted as removed,
1370 """Marks the files in unknown as added, the files in deleted as removed,
1371 and the files in renames as copied."""
1371 and the files in renames as copied."""
1372 wctx = repo[None]
1372 wctx = repo[None]
1373 with repo.wlock():
1373 with repo.wlock():
1374 wctx.forget(deleted)
1374 wctx.forget(deleted)
1375 wctx.add(unknown)
1375 wctx.add(unknown)
1376 for new, old in pycompat.iteritems(renames):
1376 for new, old in pycompat.iteritems(renames):
1377 wctx.copy(old, new)
1377 wctx.copy(old, new)
1378
1378
1379
1379
1380 def getrenamedfn(repo, endrev=None):
1380 def getrenamedfn(repo, endrev=None):
1381 if copiesmod.usechangesetcentricalgo(repo):
1381 if copiesmod.usechangesetcentricalgo(repo):
1382
1382
1383 def getrenamed(fn, rev):
1383 def getrenamed(fn, rev):
1384 ctx = repo[rev]
1384 ctx = repo[rev]
1385 p1copies = ctx.p1copies()
1385 p1copies = ctx.p1copies()
1386 if fn in p1copies:
1386 if fn in p1copies:
1387 return p1copies[fn]
1387 return p1copies[fn]
1388 p2copies = ctx.p2copies()
1388 p2copies = ctx.p2copies()
1389 if fn in p2copies:
1389 if fn in p2copies:
1390 return p2copies[fn]
1390 return p2copies[fn]
1391 return None
1391 return None
1392
1392
1393 return getrenamed
1393 return getrenamed
1394
1394
1395 rcache = {}
1395 rcache = {}
1396 if endrev is None:
1396 if endrev is None:
1397 endrev = len(repo)
1397 endrev = len(repo)
1398
1398
1399 def getrenamed(fn, rev):
1399 def getrenamed(fn, rev):
1400 """looks up all renames for a file (up to endrev) the first
1400 """looks up all renames for a file (up to endrev) the first
1401 time the file is given. It indexes on the changerev and only
1401 time the file is given. It indexes on the changerev and only
1402 parses the manifest if linkrev != changerev.
1402 parses the manifest if linkrev != changerev.
1403 Returns rename info for fn at changerev rev."""
1403 Returns rename info for fn at changerev rev."""
1404 if fn not in rcache:
1404 if fn not in rcache:
1405 rcache[fn] = {}
1405 rcache[fn] = {}
1406 fl = repo.file(fn)
1406 fl = repo.file(fn)
1407 for i in fl:
1407 for i in fl:
1408 lr = fl.linkrev(i)
1408 lr = fl.linkrev(i)
1409 renamed = fl.renamed(fl.node(i))
1409 renamed = fl.renamed(fl.node(i))
1410 rcache[fn][lr] = renamed and renamed[0]
1410 rcache[fn][lr] = renamed and renamed[0]
1411 if lr >= endrev:
1411 if lr >= endrev:
1412 break
1412 break
1413 if rev in rcache[fn]:
1413 if rev in rcache[fn]:
1414 return rcache[fn][rev]
1414 return rcache[fn][rev]
1415
1415
1416 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1416 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1417 # filectx logic.
1417 # filectx logic.
1418 try:
1418 try:
1419 return repo[rev][fn].copysource()
1419 return repo[rev][fn].copysource()
1420 except error.LookupError:
1420 except error.LookupError:
1421 return None
1421 return None
1422
1422
1423 return getrenamed
1423 return getrenamed
1424
1424
1425
1425
1426 def getcopiesfn(repo, endrev=None):
1426 def getcopiesfn(repo, endrev=None):
1427 if copiesmod.usechangesetcentricalgo(repo):
1427 if copiesmod.usechangesetcentricalgo(repo):
1428
1428
1429 def copiesfn(ctx):
1429 def copiesfn(ctx):
1430 if ctx.p2copies():
1430 if ctx.p2copies():
1431 allcopies = ctx.p1copies().copy()
1431 allcopies = ctx.p1copies().copy()
1432 # There should be no overlap
1432 # There should be no overlap
1433 allcopies.update(ctx.p2copies())
1433 allcopies.update(ctx.p2copies())
1434 return sorted(allcopies.items())
1434 return sorted(allcopies.items())
1435 else:
1435 else:
1436 return sorted(ctx.p1copies().items())
1436 return sorted(ctx.p1copies().items())
1437
1437
1438 else:
1438 else:
1439 getrenamed = getrenamedfn(repo, endrev)
1439 getrenamed = getrenamedfn(repo, endrev)
1440
1440
1441 def copiesfn(ctx):
1441 def copiesfn(ctx):
1442 copies = []
1442 copies = []
1443 for fn in ctx.files():
1443 for fn in ctx.files():
1444 rename = getrenamed(fn, ctx.rev())
1444 rename = getrenamed(fn, ctx.rev())
1445 if rename:
1445 if rename:
1446 copies.append((fn, rename))
1446 copies.append((fn, rename))
1447 return copies
1447 return copies
1448
1448
1449 return copiesfn
1449 return copiesfn
1450
1450
1451
1451
1452 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1452 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1453 """Update the dirstate to reflect the intent of copying src to dst. For
1453 """Update the dirstate to reflect the intent of copying src to dst. For
1454 different reasons it might not end with dst being marked as copied from src.
1454 different reasons it might not end with dst being marked as copied from src.
1455 """
1455 """
1456 origsrc = repo.dirstate.copied(src) or src
1456 origsrc = repo.dirstate.copied(src) or src
1457 if dst == origsrc: # copying back a copy?
1457 if dst == origsrc: # copying back a copy?
1458 if repo.dirstate[dst] not in b'mn' and not dryrun:
1458 if repo.dirstate[dst] not in b'mn' and not dryrun:
1459 repo.dirstate.normallookup(dst)
1459 repo.dirstate.normallookup(dst)
1460 else:
1460 else:
1461 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1461 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1462 if not ui.quiet:
1462 if not ui.quiet:
1463 ui.warn(
1463 ui.warn(
1464 _(
1464 _(
1465 b"%s has not been committed yet, so no copy "
1465 b"%s has not been committed yet, so no copy "
1466 b"data will be stored for %s.\n"
1466 b"data will be stored for %s.\n"
1467 )
1467 )
1468 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1468 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1469 )
1469 )
1470 if repo.dirstate[dst] in b'?r' and not dryrun:
1470 if repo.dirstate[dst] in b'?r' and not dryrun:
1471 wctx.add([dst])
1471 wctx.add([dst])
1472 elif not dryrun:
1472 elif not dryrun:
1473 wctx.copy(origsrc, dst)
1473 wctx.copy(origsrc, dst)
1474
1474
1475
1475
1476 def movedirstate(repo, newctx, match=None):
1476 def movedirstate(repo, newctx, match=None):
1477 """Move the dirstate to newctx and adjust it as necessary.
1477 """Move the dirstate to newctx and adjust it as necessary.
1478
1478
1479 A matcher can be provided as an optimization. It is probably a bug to pass
1479 A matcher can be provided as an optimization. It is probably a bug to pass
1480 a matcher that doesn't match all the differences between the parent of the
1480 a matcher that doesn't match all the differences between the parent of the
1481 working copy and newctx.
1481 working copy and newctx.
1482 """
1482 """
1483 oldctx = repo[b'.']
1483 oldctx = repo[b'.']
1484 ds = repo.dirstate
1484 ds = repo.dirstate
1485 copies = dict(ds.copies())
1485 copies = dict(ds.copies())
1486 ds.setparents(newctx.node(), repo.nullid)
1486 ds.setparents(newctx.node(), repo.nullid)
1487 s = newctx.status(oldctx, match=match)
1487 s = newctx.status(oldctx, match=match)
1488
1488 for f in s.modified:
1489 for f in s.modified:
1489 if ds[f] == b'r':
1490 ds.update_file_reference(f, p1_tracked=True)
1490 # modified + removed -> removed
1491 continue
1492 ds.normallookup(f)
1493
1491
1494 for f in s.added:
1492 for f in s.added:
1495 if ds[f] == b'r':
1493 ds.update_file_reference(f, p1_tracked=False)
1496 # added + removed -> unknown
1497 ds.drop(f)
1498 elif ds[f] != b'a':
1499 ds.add(f)
1500
1494
1501 for f in s.removed:
1495 for f in s.removed:
1502 if ds[f] == b'a':
1496 ds.update_file_reference(f, p1_tracked=True)
1503 # removed + added -> normal
1504 ds.normallookup(f)
1505 elif ds[f] != b'r':
1506 ds.remove(f)
1507
1497
1508 # Merge old parent and old working dir copies
1498 # Merge old parent and old working dir copies
1509 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1499 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1510 oldcopies.update(copies)
1500 oldcopies.update(copies)
1511 copies = {
1501 copies = {
1512 dst: oldcopies.get(src, src)
1502 dst: oldcopies.get(src, src)
1513 for dst, src in pycompat.iteritems(oldcopies)
1503 for dst, src in pycompat.iteritems(oldcopies)
1514 }
1504 }
1515 # Adjust the dirstate copies
1505 # Adjust the dirstate copies
1516 for dst, src in pycompat.iteritems(copies):
1506 for dst, src in pycompat.iteritems(copies):
1517 if src not in newctx or dst in newctx or ds[dst] != b'a':
1507 if src not in newctx or dst in newctx or ds[dst] != b'a':
1518 src = None
1508 src = None
1519 ds.copy(src, dst)
1509 ds.copy(src, dst)
1520 repo._quick_access_changeid_invalidate()
1510 repo._quick_access_changeid_invalidate()
1521
1511
1522
1512
1523 def filterrequirements(requirements):
1513 def filterrequirements(requirements):
1524 """filters the requirements into two sets:
1514 """filters the requirements into two sets:
1525
1515
1526 wcreq: requirements which should be written in .hg/requires
1516 wcreq: requirements which should be written in .hg/requires
1527 storereq: which should be written in .hg/store/requires
1517 storereq: which should be written in .hg/store/requires
1528
1518
1529 Returns (wcreq, storereq)
1519 Returns (wcreq, storereq)
1530 """
1520 """
1531 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1521 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1532 wc, store = set(), set()
1522 wc, store = set(), set()
1533 for r in requirements:
1523 for r in requirements:
1534 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1524 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1535 wc.add(r)
1525 wc.add(r)
1536 else:
1526 else:
1537 store.add(r)
1527 store.add(r)
1538 return wc, store
1528 return wc, store
1539 return requirements, None
1529 return requirements, None
1540
1530
1541
1531
1542 def istreemanifest(repo):
1532 def istreemanifest(repo):
1543 """returns whether the repository is using treemanifest or not"""
1533 """returns whether the repository is using treemanifest or not"""
1544 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1534 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1545
1535
1546
1536
1547 def writereporequirements(repo, requirements=None):
1537 def writereporequirements(repo, requirements=None):
1548 """writes requirements for the repo
1538 """writes requirements for the repo
1549
1539
1550 Requirements are written to .hg/requires and .hg/store/requires based
1540 Requirements are written to .hg/requires and .hg/store/requires based
1551 on whether share-safe mode is enabled and which requirements are wdir
1541 on whether share-safe mode is enabled and which requirements are wdir
1552 requirements and which are store requirements
1542 requirements and which are store requirements
1553 """
1543 """
1554 if requirements:
1544 if requirements:
1555 repo.requirements = requirements
1545 repo.requirements = requirements
1556 wcreq, storereq = filterrequirements(repo.requirements)
1546 wcreq, storereq = filterrequirements(repo.requirements)
1557 if wcreq is not None:
1547 if wcreq is not None:
1558 writerequires(repo.vfs, wcreq)
1548 writerequires(repo.vfs, wcreq)
1559 if storereq is not None:
1549 if storereq is not None:
1560 writerequires(repo.svfs, storereq)
1550 writerequires(repo.svfs, storereq)
1561 elif repo.ui.configbool(b'format', b'usestore'):
1551 elif repo.ui.configbool(b'format', b'usestore'):
1562 # only remove store requires if we are using store
1552 # only remove store requires if we are using store
1563 repo.svfs.tryunlink(b'requires')
1553 repo.svfs.tryunlink(b'requires')
1564
1554
1565
1555
1566 def writerequires(opener, requirements):
1556 def writerequires(opener, requirements):
1567 with opener(b'requires', b'w', atomictemp=True) as fp:
1557 with opener(b'requires', b'w', atomictemp=True) as fp:
1568 for r in sorted(requirements):
1558 for r in sorted(requirements):
1569 fp.write(b"%s\n" % r)
1559 fp.write(b"%s\n" % r)
1570
1560
1571
1561
1572 class filecachesubentry(object):
1562 class filecachesubentry(object):
1573 def __init__(self, path, stat):
1563 def __init__(self, path, stat):
1574 self.path = path
1564 self.path = path
1575 self.cachestat = None
1565 self.cachestat = None
1576 self._cacheable = None
1566 self._cacheable = None
1577
1567
1578 if stat:
1568 if stat:
1579 self.cachestat = filecachesubentry.stat(self.path)
1569 self.cachestat = filecachesubentry.stat(self.path)
1580
1570
1581 if self.cachestat:
1571 if self.cachestat:
1582 self._cacheable = self.cachestat.cacheable()
1572 self._cacheable = self.cachestat.cacheable()
1583 else:
1573 else:
1584 # None means we don't know yet
1574 # None means we don't know yet
1585 self._cacheable = None
1575 self._cacheable = None
1586
1576
1587 def refresh(self):
1577 def refresh(self):
1588 if self.cacheable():
1578 if self.cacheable():
1589 self.cachestat = filecachesubentry.stat(self.path)
1579 self.cachestat = filecachesubentry.stat(self.path)
1590
1580
1591 def cacheable(self):
1581 def cacheable(self):
1592 if self._cacheable is not None:
1582 if self._cacheable is not None:
1593 return self._cacheable
1583 return self._cacheable
1594
1584
1595 # we don't know yet, assume it is for now
1585 # we don't know yet, assume it is for now
1596 return True
1586 return True
1597
1587
1598 def changed(self):
1588 def changed(self):
1599 # no point in going further if we can't cache it
1589 # no point in going further if we can't cache it
1600 if not self.cacheable():
1590 if not self.cacheable():
1601 return True
1591 return True
1602
1592
1603 newstat = filecachesubentry.stat(self.path)
1593 newstat = filecachesubentry.stat(self.path)
1604
1594
1605 # we may not know if it's cacheable yet, check again now
1595 # we may not know if it's cacheable yet, check again now
1606 if newstat and self._cacheable is None:
1596 if newstat and self._cacheable is None:
1607 self._cacheable = newstat.cacheable()
1597 self._cacheable = newstat.cacheable()
1608
1598
1609 # check again
1599 # check again
1610 if not self._cacheable:
1600 if not self._cacheable:
1611 return True
1601 return True
1612
1602
1613 if self.cachestat != newstat:
1603 if self.cachestat != newstat:
1614 self.cachestat = newstat
1604 self.cachestat = newstat
1615 return True
1605 return True
1616 else:
1606 else:
1617 return False
1607 return False
1618
1608
1619 @staticmethod
1609 @staticmethod
1620 def stat(path):
1610 def stat(path):
1621 try:
1611 try:
1622 return util.cachestat(path)
1612 return util.cachestat(path)
1623 except OSError as e:
1613 except OSError as e:
1624 if e.errno != errno.ENOENT:
1614 if e.errno != errno.ENOENT:
1625 raise
1615 raise
1626
1616
1627
1617
1628 class filecacheentry(object):
1618 class filecacheentry(object):
1629 def __init__(self, paths, stat=True):
1619 def __init__(self, paths, stat=True):
1630 self._entries = []
1620 self._entries = []
1631 for path in paths:
1621 for path in paths:
1632 self._entries.append(filecachesubentry(path, stat))
1622 self._entries.append(filecachesubentry(path, stat))
1633
1623
1634 def changed(self):
1624 def changed(self):
1635 '''true if any entry has changed'''
1625 '''true if any entry has changed'''
1636 for entry in self._entries:
1626 for entry in self._entries:
1637 if entry.changed():
1627 if entry.changed():
1638 return True
1628 return True
1639 return False
1629 return False
1640
1630
1641 def refresh(self):
1631 def refresh(self):
1642 for entry in self._entries:
1632 for entry in self._entries:
1643 entry.refresh()
1633 entry.refresh()
1644
1634
1645
1635
1646 class filecache(object):
1636 class filecache(object):
1647 """A property like decorator that tracks files under .hg/ for updates.
1637 """A property like decorator that tracks files under .hg/ for updates.
1648
1638
1649 On first access, the files defined as arguments are stat()ed and the
1639 On first access, the files defined as arguments are stat()ed and the
1650 results cached. The decorated function is called. The results are stashed
1640 results cached. The decorated function is called. The results are stashed
1651 away in a ``_filecache`` dict on the object whose method is decorated.
1641 away in a ``_filecache`` dict on the object whose method is decorated.
1652
1642
1653 On subsequent access, the cached result is used as it is set to the
1643 On subsequent access, the cached result is used as it is set to the
1654 instance dictionary.
1644 instance dictionary.
1655
1645
1656 On external property set/delete operations, the caller must update the
1646 On external property set/delete operations, the caller must update the
1657 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1647 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1658 instead of directly setting <attr>.
1648 instead of directly setting <attr>.
1659
1649
1660 When using the property API, the cached data is always used if available.
1650 When using the property API, the cached data is always used if available.
1661 No stat() is performed to check if the file has changed.
1651 No stat() is performed to check if the file has changed.
1662
1652
1663 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1653 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1664 can populate an entry before the property's getter is called. In this case,
1654 can populate an entry before the property's getter is called. In this case,
1665 entries in ``_filecache`` will be used during property operations,
1655 entries in ``_filecache`` will be used during property operations,
1666 if available. If the underlying file changes, it is up to external callers
1656 if available. If the underlying file changes, it is up to external callers
1667 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1657 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1668 method result as well as possibly calling ``del obj._filecache[attr]`` to
1658 method result as well as possibly calling ``del obj._filecache[attr]`` to
1669 remove the ``filecacheentry``.
1659 remove the ``filecacheentry``.
1670 """
1660 """
1671
1661
1672 def __init__(self, *paths):
1662 def __init__(self, *paths):
1673 self.paths = paths
1663 self.paths = paths
1674
1664
1675 def join(self, obj, fname):
1665 def join(self, obj, fname):
1676 """Used to compute the runtime path of a cached file.
1666 """Used to compute the runtime path of a cached file.
1677
1667
1678 Users should subclass filecache and provide their own version of this
1668 Users should subclass filecache and provide their own version of this
1679 function to call the appropriate join function on 'obj' (an instance
1669 function to call the appropriate join function on 'obj' (an instance
1680 of the class that its member function was decorated).
1670 of the class that its member function was decorated).
1681 """
1671 """
1682 raise NotImplementedError
1672 raise NotImplementedError
1683
1673
1684 def __call__(self, func):
1674 def __call__(self, func):
1685 self.func = func
1675 self.func = func
1686 self.sname = func.__name__
1676 self.sname = func.__name__
1687 self.name = pycompat.sysbytes(self.sname)
1677 self.name = pycompat.sysbytes(self.sname)
1688 return self
1678 return self
1689
1679
1690 def __get__(self, obj, type=None):
1680 def __get__(self, obj, type=None):
1691 # if accessed on the class, return the descriptor itself.
1681 # if accessed on the class, return the descriptor itself.
1692 if obj is None:
1682 if obj is None:
1693 return self
1683 return self
1694
1684
1695 assert self.sname not in obj.__dict__
1685 assert self.sname not in obj.__dict__
1696
1686
1697 entry = obj._filecache.get(self.name)
1687 entry = obj._filecache.get(self.name)
1698
1688
1699 if entry:
1689 if entry:
1700 if entry.changed():
1690 if entry.changed():
1701 entry.obj = self.func(obj)
1691 entry.obj = self.func(obj)
1702 else:
1692 else:
1703 paths = [self.join(obj, path) for path in self.paths]
1693 paths = [self.join(obj, path) for path in self.paths]
1704
1694
1705 # We stat -before- creating the object so our cache doesn't lie if
1695 # We stat -before- creating the object so our cache doesn't lie if
1706 # a writer modified between the time we read and stat
1696 # a writer modified between the time we read and stat
1707 entry = filecacheentry(paths, True)
1697 entry = filecacheentry(paths, True)
1708 entry.obj = self.func(obj)
1698 entry.obj = self.func(obj)
1709
1699
1710 obj._filecache[self.name] = entry
1700 obj._filecache[self.name] = entry
1711
1701
1712 obj.__dict__[self.sname] = entry.obj
1702 obj.__dict__[self.sname] = entry.obj
1713 return entry.obj
1703 return entry.obj
1714
1704
1715 # don't implement __set__(), which would make __dict__ lookup as slow as
1705 # don't implement __set__(), which would make __dict__ lookup as slow as
1716 # function call.
1706 # function call.
1717
1707
1718 def set(self, obj, value):
1708 def set(self, obj, value):
1719 if self.name not in obj._filecache:
1709 if self.name not in obj._filecache:
1720 # we add an entry for the missing value because X in __dict__
1710 # we add an entry for the missing value because X in __dict__
1721 # implies X in _filecache
1711 # implies X in _filecache
1722 paths = [self.join(obj, path) for path in self.paths]
1712 paths = [self.join(obj, path) for path in self.paths]
1723 ce = filecacheentry(paths, False)
1713 ce = filecacheentry(paths, False)
1724 obj._filecache[self.name] = ce
1714 obj._filecache[self.name] = ce
1725 else:
1715 else:
1726 ce = obj._filecache[self.name]
1716 ce = obj._filecache[self.name]
1727
1717
1728 ce.obj = value # update cached copy
1718 ce.obj = value # update cached copy
1729 obj.__dict__[self.sname] = value # update copy returned by obj.x
1719 obj.__dict__[self.sname] = value # update copy returned by obj.x
1730
1720
1731
1721
1732 def extdatasource(repo, source):
1722 def extdatasource(repo, source):
1733 """Gather a map of rev -> value dict from the specified source
1723 """Gather a map of rev -> value dict from the specified source
1734
1724
1735 A source spec is treated as a URL, with a special case shell: type
1725 A source spec is treated as a URL, with a special case shell: type
1736 for parsing the output from a shell command.
1726 for parsing the output from a shell command.
1737
1727
1738 The data is parsed as a series of newline-separated records where
1728 The data is parsed as a series of newline-separated records where
1739 each record is a revision specifier optionally followed by a space
1729 each record is a revision specifier optionally followed by a space
1740 and a freeform string value. If the revision is known locally, it
1730 and a freeform string value. If the revision is known locally, it
1741 is converted to a rev, otherwise the record is skipped.
1731 is converted to a rev, otherwise the record is skipped.
1742
1732
1743 Note that both key and value are treated as UTF-8 and converted to
1733 Note that both key and value are treated as UTF-8 and converted to
1744 the local encoding. This allows uniformity between local and
1734 the local encoding. This allows uniformity between local and
1745 remote data sources.
1735 remote data sources.
1746 """
1736 """
1747
1737
1748 spec = repo.ui.config(b"extdata", source)
1738 spec = repo.ui.config(b"extdata", source)
1749 if not spec:
1739 if not spec:
1750 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1740 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1751
1741
1752 data = {}
1742 data = {}
1753 src = proc = None
1743 src = proc = None
1754 try:
1744 try:
1755 if spec.startswith(b"shell:"):
1745 if spec.startswith(b"shell:"):
1756 # external commands should be run relative to the repo root
1746 # external commands should be run relative to the repo root
1757 cmd = spec[6:]
1747 cmd = spec[6:]
1758 proc = subprocess.Popen(
1748 proc = subprocess.Popen(
1759 procutil.tonativestr(cmd),
1749 procutil.tonativestr(cmd),
1760 shell=True,
1750 shell=True,
1761 bufsize=-1,
1751 bufsize=-1,
1762 close_fds=procutil.closefds,
1752 close_fds=procutil.closefds,
1763 stdout=subprocess.PIPE,
1753 stdout=subprocess.PIPE,
1764 cwd=procutil.tonativestr(repo.root),
1754 cwd=procutil.tonativestr(repo.root),
1765 )
1755 )
1766 src = proc.stdout
1756 src = proc.stdout
1767 else:
1757 else:
1768 # treat as a URL or file
1758 # treat as a URL or file
1769 src = url.open(repo.ui, spec)
1759 src = url.open(repo.ui, spec)
1770 for l in src:
1760 for l in src:
1771 if b" " in l:
1761 if b" " in l:
1772 k, v = l.strip().split(b" ", 1)
1762 k, v = l.strip().split(b" ", 1)
1773 else:
1763 else:
1774 k, v = l.strip(), b""
1764 k, v = l.strip(), b""
1775
1765
1776 k = encoding.tolocal(k)
1766 k = encoding.tolocal(k)
1777 try:
1767 try:
1778 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1768 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1779 except (error.LookupError, error.RepoLookupError, error.InputError):
1769 except (error.LookupError, error.RepoLookupError, error.InputError):
1780 pass # we ignore data for nodes that don't exist locally
1770 pass # we ignore data for nodes that don't exist locally
1781 finally:
1771 finally:
1782 if proc:
1772 if proc:
1783 try:
1773 try:
1784 proc.communicate()
1774 proc.communicate()
1785 except ValueError:
1775 except ValueError:
1786 # This happens if we started iterating src and then
1776 # This happens if we started iterating src and then
1787 # get a parse error on a line. It should be safe to ignore.
1777 # get a parse error on a line. It should be safe to ignore.
1788 pass
1778 pass
1789 if src:
1779 if src:
1790 src.close()
1780 src.close()
1791 if proc and proc.returncode != 0:
1781 if proc and proc.returncode != 0:
1792 raise error.Abort(
1782 raise error.Abort(
1793 _(b"extdata command '%s' failed: %s")
1783 _(b"extdata command '%s' failed: %s")
1794 % (cmd, procutil.explainexit(proc.returncode))
1784 % (cmd, procutil.explainexit(proc.returncode))
1795 )
1785 )
1796
1786
1797 return data
1787 return data
1798
1788
1799
1789
1800 class progress(object):
1790 class progress(object):
1801 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1791 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1802 self.ui = ui
1792 self.ui = ui
1803 self.pos = 0
1793 self.pos = 0
1804 self.topic = topic
1794 self.topic = topic
1805 self.unit = unit
1795 self.unit = unit
1806 self.total = total
1796 self.total = total
1807 self.debug = ui.configbool(b'progress', b'debug')
1797 self.debug = ui.configbool(b'progress', b'debug')
1808 self._updatebar = updatebar
1798 self._updatebar = updatebar
1809
1799
1810 def __enter__(self):
1800 def __enter__(self):
1811 return self
1801 return self
1812
1802
1813 def __exit__(self, exc_type, exc_value, exc_tb):
1803 def __exit__(self, exc_type, exc_value, exc_tb):
1814 self.complete()
1804 self.complete()
1815
1805
1816 def update(self, pos, item=b"", total=None):
1806 def update(self, pos, item=b"", total=None):
1817 assert pos is not None
1807 assert pos is not None
1818 if total:
1808 if total:
1819 self.total = total
1809 self.total = total
1820 self.pos = pos
1810 self.pos = pos
1821 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1811 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1822 if self.debug:
1812 if self.debug:
1823 self._printdebug(item)
1813 self._printdebug(item)
1824
1814
1825 def increment(self, step=1, item=b"", total=None):
1815 def increment(self, step=1, item=b"", total=None):
1826 self.update(self.pos + step, item, total)
1816 self.update(self.pos + step, item, total)
1827
1817
1828 def complete(self):
1818 def complete(self):
1829 self.pos = None
1819 self.pos = None
1830 self.unit = b""
1820 self.unit = b""
1831 self.total = None
1821 self.total = None
1832 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1822 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1833
1823
1834 def _printdebug(self, item):
1824 def _printdebug(self, item):
1835 unit = b''
1825 unit = b''
1836 if self.unit:
1826 if self.unit:
1837 unit = b' ' + self.unit
1827 unit = b' ' + self.unit
1838 if item:
1828 if item:
1839 item = b' ' + item
1829 item = b' ' + item
1840
1830
1841 if self.total:
1831 if self.total:
1842 pct = 100.0 * self.pos / self.total
1832 pct = 100.0 * self.pos / self.total
1843 self.ui.debug(
1833 self.ui.debug(
1844 b'%s:%s %d/%d%s (%4.2f%%)\n'
1834 b'%s:%s %d/%d%s (%4.2f%%)\n'
1845 % (self.topic, item, self.pos, self.total, unit, pct)
1835 % (self.topic, item, self.pos, self.total, unit, pct)
1846 )
1836 )
1847 else:
1837 else:
1848 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1838 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1849
1839
1850
1840
1851 def gdinitconfig(ui):
1841 def gdinitconfig(ui):
1852 """helper function to know if a repo should be created as general delta"""
1842 """helper function to know if a repo should be created as general delta"""
1853 # experimental config: format.generaldelta
1843 # experimental config: format.generaldelta
1854 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1844 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1855 b'format', b'usegeneraldelta'
1845 b'format', b'usegeneraldelta'
1856 )
1846 )
1857
1847
1858
1848
1859 def gddeltaconfig(ui):
1849 def gddeltaconfig(ui):
1860 """helper function to know if incoming delta should be optimised"""
1850 """helper function to know if incoming delta should be optimised"""
1861 # experimental config: format.generaldelta
1851 # experimental config: format.generaldelta
1862 return ui.configbool(b'format', b'generaldelta')
1852 return ui.configbool(b'format', b'generaldelta')
1863
1853
1864
1854
1865 class simplekeyvaluefile(object):
1855 class simplekeyvaluefile(object):
1866 """A simple file with key=value lines
1856 """A simple file with key=value lines
1867
1857
1868 Keys must be alphanumerics and start with a letter, values must not
1858 Keys must be alphanumerics and start with a letter, values must not
1869 contain '\n' characters"""
1859 contain '\n' characters"""
1870
1860
1871 firstlinekey = b'__firstline'
1861 firstlinekey = b'__firstline'
1872
1862
1873 def __init__(self, vfs, path, keys=None):
1863 def __init__(self, vfs, path, keys=None):
1874 self.vfs = vfs
1864 self.vfs = vfs
1875 self.path = path
1865 self.path = path
1876
1866
1877 def read(self, firstlinenonkeyval=False):
1867 def read(self, firstlinenonkeyval=False):
1878 """Read the contents of a simple key-value file
1868 """Read the contents of a simple key-value file
1879
1869
1880 'firstlinenonkeyval' indicates whether the first line of file should
1870 'firstlinenonkeyval' indicates whether the first line of file should
1881 be treated as a key-value pair or reuturned fully under the
1871 be treated as a key-value pair or reuturned fully under the
1882 __firstline key."""
1872 __firstline key."""
1883 lines = self.vfs.readlines(self.path)
1873 lines = self.vfs.readlines(self.path)
1884 d = {}
1874 d = {}
1885 if firstlinenonkeyval:
1875 if firstlinenonkeyval:
1886 if not lines:
1876 if not lines:
1887 e = _(b"empty simplekeyvalue file")
1877 e = _(b"empty simplekeyvalue file")
1888 raise error.CorruptedState(e)
1878 raise error.CorruptedState(e)
1889 # we don't want to include '\n' in the __firstline
1879 # we don't want to include '\n' in the __firstline
1890 d[self.firstlinekey] = lines[0][:-1]
1880 d[self.firstlinekey] = lines[0][:-1]
1891 del lines[0]
1881 del lines[0]
1892
1882
1893 try:
1883 try:
1894 # the 'if line.strip()' part prevents us from failing on empty
1884 # the 'if line.strip()' part prevents us from failing on empty
1895 # lines which only contain '\n' therefore are not skipped
1885 # lines which only contain '\n' therefore are not skipped
1896 # by 'if line'
1886 # by 'if line'
1897 updatedict = dict(
1887 updatedict = dict(
1898 line[:-1].split(b'=', 1) for line in lines if line.strip()
1888 line[:-1].split(b'=', 1) for line in lines if line.strip()
1899 )
1889 )
1900 if self.firstlinekey in updatedict:
1890 if self.firstlinekey in updatedict:
1901 e = _(b"%r can't be used as a key")
1891 e = _(b"%r can't be used as a key")
1902 raise error.CorruptedState(e % self.firstlinekey)
1892 raise error.CorruptedState(e % self.firstlinekey)
1903 d.update(updatedict)
1893 d.update(updatedict)
1904 except ValueError as e:
1894 except ValueError as e:
1905 raise error.CorruptedState(stringutil.forcebytestr(e))
1895 raise error.CorruptedState(stringutil.forcebytestr(e))
1906 return d
1896 return d
1907
1897
1908 def write(self, data, firstline=None):
1898 def write(self, data, firstline=None):
1909 """Write key=>value mapping to a file
1899 """Write key=>value mapping to a file
1910 data is a dict. Keys must be alphanumerical and start with a letter.
1900 data is a dict. Keys must be alphanumerical and start with a letter.
1911 Values must not contain newline characters.
1901 Values must not contain newline characters.
1912
1902
1913 If 'firstline' is not None, it is written to file before
1903 If 'firstline' is not None, it is written to file before
1914 everything else, as it is, not in a key=value form"""
1904 everything else, as it is, not in a key=value form"""
1915 lines = []
1905 lines = []
1916 if firstline is not None:
1906 if firstline is not None:
1917 lines.append(b'%s\n' % firstline)
1907 lines.append(b'%s\n' % firstline)
1918
1908
1919 for k, v in data.items():
1909 for k, v in data.items():
1920 if k == self.firstlinekey:
1910 if k == self.firstlinekey:
1921 e = b"key name '%s' is reserved" % self.firstlinekey
1911 e = b"key name '%s' is reserved" % self.firstlinekey
1922 raise error.ProgrammingError(e)
1912 raise error.ProgrammingError(e)
1923 if not k[0:1].isalpha():
1913 if not k[0:1].isalpha():
1924 e = b"keys must start with a letter in a key-value file"
1914 e = b"keys must start with a letter in a key-value file"
1925 raise error.ProgrammingError(e)
1915 raise error.ProgrammingError(e)
1926 if not k.isalnum():
1916 if not k.isalnum():
1927 e = b"invalid key name in a simple key-value file"
1917 e = b"invalid key name in a simple key-value file"
1928 raise error.ProgrammingError(e)
1918 raise error.ProgrammingError(e)
1929 if b'\n' in v:
1919 if b'\n' in v:
1930 e = b"invalid value in a simple key-value file"
1920 e = b"invalid value in a simple key-value file"
1931 raise error.ProgrammingError(e)
1921 raise error.ProgrammingError(e)
1932 lines.append(b"%s=%s\n" % (k, v))
1922 lines.append(b"%s=%s\n" % (k, v))
1933 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1923 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1934 fp.write(b''.join(lines))
1924 fp.write(b''.join(lines))
1935
1925
1936
1926
1937 _reportobsoletedsource = [
1927 _reportobsoletedsource = [
1938 b'debugobsolete',
1928 b'debugobsolete',
1939 b'pull',
1929 b'pull',
1940 b'push',
1930 b'push',
1941 b'serve',
1931 b'serve',
1942 b'unbundle',
1932 b'unbundle',
1943 ]
1933 ]
1944
1934
1945 _reportnewcssource = [
1935 _reportnewcssource = [
1946 b'pull',
1936 b'pull',
1947 b'unbundle',
1937 b'unbundle',
1948 ]
1938 ]
1949
1939
1950
1940
1951 def prefetchfiles(repo, revmatches):
1941 def prefetchfiles(repo, revmatches):
1952 """Invokes the registered file prefetch functions, allowing extensions to
1942 """Invokes the registered file prefetch functions, allowing extensions to
1953 ensure the corresponding files are available locally, before the command
1943 ensure the corresponding files are available locally, before the command
1954 uses them.
1944 uses them.
1955
1945
1956 Args:
1946 Args:
1957 revmatches: a list of (revision, match) tuples to indicate the files to
1947 revmatches: a list of (revision, match) tuples to indicate the files to
1958 fetch at each revision. If any of the match elements is None, it matches
1948 fetch at each revision. If any of the match elements is None, it matches
1959 all files.
1949 all files.
1960 """
1950 """
1961
1951
1962 def _matcher(m):
1952 def _matcher(m):
1963 if m:
1953 if m:
1964 assert isinstance(m, matchmod.basematcher)
1954 assert isinstance(m, matchmod.basematcher)
1965 # The command itself will complain about files that don't exist, so
1955 # The command itself will complain about files that don't exist, so
1966 # don't duplicate the message.
1956 # don't duplicate the message.
1967 return matchmod.badmatch(m, lambda fn, msg: None)
1957 return matchmod.badmatch(m, lambda fn, msg: None)
1968 else:
1958 else:
1969 return matchall(repo)
1959 return matchall(repo)
1970
1960
1971 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1961 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1972
1962
1973 fileprefetchhooks(repo, revbadmatches)
1963 fileprefetchhooks(repo, revbadmatches)
1974
1964
1975
1965
1976 # a list of (repo, revs, match) prefetch functions
1966 # a list of (repo, revs, match) prefetch functions
1977 fileprefetchhooks = util.hooks()
1967 fileprefetchhooks = util.hooks()
1978
1968
1979 # A marker that tells the evolve extension to suppress its own reporting
1969 # A marker that tells the evolve extension to suppress its own reporting
1980 _reportstroubledchangesets = True
1970 _reportstroubledchangesets = True
1981
1971
1982
1972
1983 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1973 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1984 """register a callback to issue a summary after the transaction is closed
1974 """register a callback to issue a summary after the transaction is closed
1985
1975
1986 If as_validator is true, then the callbacks are registered as transaction
1976 If as_validator is true, then the callbacks are registered as transaction
1987 validators instead
1977 validators instead
1988 """
1978 """
1989
1979
1990 def txmatch(sources):
1980 def txmatch(sources):
1991 return any(txnname.startswith(source) for source in sources)
1981 return any(txnname.startswith(source) for source in sources)
1992
1982
1993 categories = []
1983 categories = []
1994
1984
1995 def reportsummary(func):
1985 def reportsummary(func):
1996 """decorator for report callbacks."""
1986 """decorator for report callbacks."""
1997 # The repoview life cycle is shorter than the one of the actual
1987 # The repoview life cycle is shorter than the one of the actual
1998 # underlying repository. So the filtered object can die before the
1988 # underlying repository. So the filtered object can die before the
1999 # weakref is used leading to troubles. We keep a reference to the
1989 # weakref is used leading to troubles. We keep a reference to the
2000 # unfiltered object and restore the filtering when retrieving the
1990 # unfiltered object and restore the filtering when retrieving the
2001 # repository through the weakref.
1991 # repository through the weakref.
2002 filtername = repo.filtername
1992 filtername = repo.filtername
2003 reporef = weakref.ref(repo.unfiltered())
1993 reporef = weakref.ref(repo.unfiltered())
2004
1994
2005 def wrapped(tr):
1995 def wrapped(tr):
2006 repo = reporef()
1996 repo = reporef()
2007 if filtername:
1997 if filtername:
2008 assert repo is not None # help pytype
1998 assert repo is not None # help pytype
2009 repo = repo.filtered(filtername)
1999 repo = repo.filtered(filtername)
2010 func(repo, tr)
2000 func(repo, tr)
2011
2001
2012 newcat = b'%02i-txnreport' % len(categories)
2002 newcat = b'%02i-txnreport' % len(categories)
2013 if as_validator:
2003 if as_validator:
2014 otr.addvalidator(newcat, wrapped)
2004 otr.addvalidator(newcat, wrapped)
2015 else:
2005 else:
2016 otr.addpostclose(newcat, wrapped)
2006 otr.addpostclose(newcat, wrapped)
2017 categories.append(newcat)
2007 categories.append(newcat)
2018 return wrapped
2008 return wrapped
2019
2009
2020 @reportsummary
2010 @reportsummary
2021 def reportchangegroup(repo, tr):
2011 def reportchangegroup(repo, tr):
2022 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2012 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2023 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2013 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2024 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2014 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2025 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2015 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2026 if cgchangesets or cgrevisions or cgfiles:
2016 if cgchangesets or cgrevisions or cgfiles:
2027 htext = b""
2017 htext = b""
2028 if cgheads:
2018 if cgheads:
2029 htext = _(b" (%+d heads)") % cgheads
2019 htext = _(b" (%+d heads)") % cgheads
2030 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2020 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2031 if as_validator:
2021 if as_validator:
2032 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2022 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2033 assert repo is not None # help pytype
2023 assert repo is not None # help pytype
2034 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2024 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2035
2025
2036 if txmatch(_reportobsoletedsource):
2026 if txmatch(_reportobsoletedsource):
2037
2027
2038 @reportsummary
2028 @reportsummary
2039 def reportobsoleted(repo, tr):
2029 def reportobsoleted(repo, tr):
2040 obsoleted = obsutil.getobsoleted(repo, tr)
2030 obsoleted = obsutil.getobsoleted(repo, tr)
2041 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2031 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2042 if newmarkers:
2032 if newmarkers:
2043 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2033 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2044 if obsoleted:
2034 if obsoleted:
2045 msg = _(b'obsoleted %i changesets\n')
2035 msg = _(b'obsoleted %i changesets\n')
2046 if as_validator:
2036 if as_validator:
2047 msg = _(b'obsoleting %i changesets\n')
2037 msg = _(b'obsoleting %i changesets\n')
2048 repo.ui.status(msg % len(obsoleted))
2038 repo.ui.status(msg % len(obsoleted))
2049
2039
2050 if obsolete.isenabled(
2040 if obsolete.isenabled(
2051 repo, obsolete.createmarkersopt
2041 repo, obsolete.createmarkersopt
2052 ) and repo.ui.configbool(
2042 ) and repo.ui.configbool(
2053 b'experimental', b'evolution.report-instabilities'
2043 b'experimental', b'evolution.report-instabilities'
2054 ):
2044 ):
2055 instabilitytypes = [
2045 instabilitytypes = [
2056 (b'orphan', b'orphan'),
2046 (b'orphan', b'orphan'),
2057 (b'phase-divergent', b'phasedivergent'),
2047 (b'phase-divergent', b'phasedivergent'),
2058 (b'content-divergent', b'contentdivergent'),
2048 (b'content-divergent', b'contentdivergent'),
2059 ]
2049 ]
2060
2050
2061 def getinstabilitycounts(repo):
2051 def getinstabilitycounts(repo):
2062 filtered = repo.changelog.filteredrevs
2052 filtered = repo.changelog.filteredrevs
2063 counts = {}
2053 counts = {}
2064 for instability, revset in instabilitytypes:
2054 for instability, revset in instabilitytypes:
2065 counts[instability] = len(
2055 counts[instability] = len(
2066 set(obsolete.getrevs(repo, revset)) - filtered
2056 set(obsolete.getrevs(repo, revset)) - filtered
2067 )
2057 )
2068 return counts
2058 return counts
2069
2059
2070 oldinstabilitycounts = getinstabilitycounts(repo)
2060 oldinstabilitycounts = getinstabilitycounts(repo)
2071
2061
2072 @reportsummary
2062 @reportsummary
2073 def reportnewinstabilities(repo, tr):
2063 def reportnewinstabilities(repo, tr):
2074 newinstabilitycounts = getinstabilitycounts(repo)
2064 newinstabilitycounts = getinstabilitycounts(repo)
2075 for instability, revset in instabilitytypes:
2065 for instability, revset in instabilitytypes:
2076 delta = (
2066 delta = (
2077 newinstabilitycounts[instability]
2067 newinstabilitycounts[instability]
2078 - oldinstabilitycounts[instability]
2068 - oldinstabilitycounts[instability]
2079 )
2069 )
2080 msg = getinstabilitymessage(delta, instability)
2070 msg = getinstabilitymessage(delta, instability)
2081 if msg:
2071 if msg:
2082 repo.ui.warn(msg)
2072 repo.ui.warn(msg)
2083
2073
2084 if txmatch(_reportnewcssource):
2074 if txmatch(_reportnewcssource):
2085
2075
2086 @reportsummary
2076 @reportsummary
2087 def reportnewcs(repo, tr):
2077 def reportnewcs(repo, tr):
2088 """Report the range of new revisions pulled/unbundled."""
2078 """Report the range of new revisions pulled/unbundled."""
2089 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2079 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2090 unfi = repo.unfiltered()
2080 unfi = repo.unfiltered()
2091 if origrepolen >= len(unfi):
2081 if origrepolen >= len(unfi):
2092 return
2082 return
2093
2083
2094 # Compute the bounds of new visible revisions' range.
2084 # Compute the bounds of new visible revisions' range.
2095 revs = smartset.spanset(repo, start=origrepolen)
2085 revs = smartset.spanset(repo, start=origrepolen)
2096 if revs:
2086 if revs:
2097 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2087 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2098
2088
2099 if minrev == maxrev:
2089 if minrev == maxrev:
2100 revrange = minrev
2090 revrange = minrev
2101 else:
2091 else:
2102 revrange = b'%s:%s' % (minrev, maxrev)
2092 revrange = b'%s:%s' % (minrev, maxrev)
2103 draft = len(repo.revs(b'%ld and draft()', revs))
2093 draft = len(repo.revs(b'%ld and draft()', revs))
2104 secret = len(repo.revs(b'%ld and secret()', revs))
2094 secret = len(repo.revs(b'%ld and secret()', revs))
2105 if not (draft or secret):
2095 if not (draft or secret):
2106 msg = _(b'new changesets %s\n') % revrange
2096 msg = _(b'new changesets %s\n') % revrange
2107 elif draft and secret:
2097 elif draft and secret:
2108 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2098 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2109 msg %= (revrange, draft, secret)
2099 msg %= (revrange, draft, secret)
2110 elif draft:
2100 elif draft:
2111 msg = _(b'new changesets %s (%d drafts)\n')
2101 msg = _(b'new changesets %s (%d drafts)\n')
2112 msg %= (revrange, draft)
2102 msg %= (revrange, draft)
2113 elif secret:
2103 elif secret:
2114 msg = _(b'new changesets %s (%d secrets)\n')
2104 msg = _(b'new changesets %s (%d secrets)\n')
2115 msg %= (revrange, secret)
2105 msg %= (revrange, secret)
2116 else:
2106 else:
2117 errormsg = b'entered unreachable condition'
2107 errormsg = b'entered unreachable condition'
2118 raise error.ProgrammingError(errormsg)
2108 raise error.ProgrammingError(errormsg)
2119 repo.ui.status(msg)
2109 repo.ui.status(msg)
2120
2110
2121 # search new changesets directly pulled as obsolete
2111 # search new changesets directly pulled as obsolete
2122 duplicates = tr.changes.get(b'revduplicates', ())
2112 duplicates = tr.changes.get(b'revduplicates', ())
2123 obsadded = unfi.revs(
2113 obsadded = unfi.revs(
2124 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2114 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2125 )
2115 )
2126 cl = repo.changelog
2116 cl = repo.changelog
2127 extinctadded = [r for r in obsadded if r not in cl]
2117 extinctadded = [r for r in obsadded if r not in cl]
2128 if extinctadded:
2118 if extinctadded:
2129 # They are not just obsolete, but obsolete and invisible
2119 # They are not just obsolete, but obsolete and invisible
2130 # we call them "extinct" internally but the terms have not been
2120 # we call them "extinct" internally but the terms have not been
2131 # exposed to users.
2121 # exposed to users.
2132 msg = b'(%d other changesets obsolete on arrival)\n'
2122 msg = b'(%d other changesets obsolete on arrival)\n'
2133 repo.ui.status(msg % len(extinctadded))
2123 repo.ui.status(msg % len(extinctadded))
2134
2124
2135 @reportsummary
2125 @reportsummary
2136 def reportphasechanges(repo, tr):
2126 def reportphasechanges(repo, tr):
2137 """Report statistics of phase changes for changesets pre-existing
2127 """Report statistics of phase changes for changesets pre-existing
2138 pull/unbundle.
2128 pull/unbundle.
2139 """
2129 """
2140 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2130 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2141 published = []
2131 published = []
2142 for revs, (old, new) in tr.changes.get(b'phases', []):
2132 for revs, (old, new) in tr.changes.get(b'phases', []):
2143 if new != phases.public:
2133 if new != phases.public:
2144 continue
2134 continue
2145 published.extend(rev for rev in revs if rev < origrepolen)
2135 published.extend(rev for rev in revs if rev < origrepolen)
2146 if not published:
2136 if not published:
2147 return
2137 return
2148 msg = _(b'%d local changesets published\n')
2138 msg = _(b'%d local changesets published\n')
2149 if as_validator:
2139 if as_validator:
2150 msg = _(b'%d local changesets will be published\n')
2140 msg = _(b'%d local changesets will be published\n')
2151 repo.ui.status(msg % len(published))
2141 repo.ui.status(msg % len(published))
2152
2142
2153
2143
2154 def getinstabilitymessage(delta, instability):
2144 def getinstabilitymessage(delta, instability):
2155 """function to return the message to show warning about new instabilities
2145 """function to return the message to show warning about new instabilities
2156
2146
2157 exists as a separate function so that extension can wrap to show more
2147 exists as a separate function so that extension can wrap to show more
2158 information like how to fix instabilities"""
2148 information like how to fix instabilities"""
2159 if delta > 0:
2149 if delta > 0:
2160 return _(b'%i new %s changesets\n') % (delta, instability)
2150 return _(b'%i new %s changesets\n') % (delta, instability)
2161
2151
2162
2152
2163 def nodesummaries(repo, nodes, maxnumnodes=4):
2153 def nodesummaries(repo, nodes, maxnumnodes=4):
2164 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2154 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2165 return b' '.join(short(h) for h in nodes)
2155 return b' '.join(short(h) for h in nodes)
2166 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2156 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2167 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2157 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2168
2158
2169
2159
2170 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2160 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2171 """check that no named branch has multiple heads"""
2161 """check that no named branch has multiple heads"""
2172 if desc in (b'strip', b'repair'):
2162 if desc in (b'strip', b'repair'):
2173 # skip the logic during strip
2163 # skip the logic during strip
2174 return
2164 return
2175 visible = repo.filtered(filtername)
2165 visible = repo.filtered(filtername)
2176 # possible improvement: we could restrict the check to affected branch
2166 # possible improvement: we could restrict the check to affected branch
2177 bm = visible.branchmap()
2167 bm = visible.branchmap()
2178 for name in bm:
2168 for name in bm:
2179 heads = bm.branchheads(name, closed=accountclosed)
2169 heads = bm.branchheads(name, closed=accountclosed)
2180 if len(heads) > 1:
2170 if len(heads) > 1:
2181 msg = _(b'rejecting multiple heads on branch "%s"')
2171 msg = _(b'rejecting multiple heads on branch "%s"')
2182 msg %= name
2172 msg %= name
2183 hint = _(b'%d heads: %s')
2173 hint = _(b'%d heads: %s')
2184 hint %= (len(heads), nodesummaries(repo, heads))
2174 hint %= (len(heads), nodesummaries(repo, heads))
2185 raise error.Abort(msg, hint=hint)
2175 raise error.Abort(msg, hint=hint)
2186
2176
2187
2177
2188 def wrapconvertsink(sink):
2178 def wrapconvertsink(sink):
2189 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2179 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2190 before it is used, whether or not the convert extension was formally loaded.
2180 before it is used, whether or not the convert extension was formally loaded.
2191 """
2181 """
2192 return sink
2182 return sink
2193
2183
2194
2184
2195 def unhidehashlikerevs(repo, specs, hiddentype):
2185 def unhidehashlikerevs(repo, specs, hiddentype):
2196 """parse the user specs and unhide changesets whose hash or revision number
2186 """parse the user specs and unhide changesets whose hash or revision number
2197 is passed.
2187 is passed.
2198
2188
2199 hiddentype can be: 1) 'warn': warn while unhiding changesets
2189 hiddentype can be: 1) 'warn': warn while unhiding changesets
2200 2) 'nowarn': don't warn while unhiding changesets
2190 2) 'nowarn': don't warn while unhiding changesets
2201
2191
2202 returns a repo object with the required changesets unhidden
2192 returns a repo object with the required changesets unhidden
2203 """
2193 """
2204 if not repo.filtername or not repo.ui.configbool(
2194 if not repo.filtername or not repo.ui.configbool(
2205 b'experimental', b'directaccess'
2195 b'experimental', b'directaccess'
2206 ):
2196 ):
2207 return repo
2197 return repo
2208
2198
2209 if repo.filtername not in (b'visible', b'visible-hidden'):
2199 if repo.filtername not in (b'visible', b'visible-hidden'):
2210 return repo
2200 return repo
2211
2201
2212 symbols = set()
2202 symbols = set()
2213 for spec in specs:
2203 for spec in specs:
2214 try:
2204 try:
2215 tree = revsetlang.parse(spec)
2205 tree = revsetlang.parse(spec)
2216 except error.ParseError: # will be reported by scmutil.revrange()
2206 except error.ParseError: # will be reported by scmutil.revrange()
2217 continue
2207 continue
2218
2208
2219 symbols.update(revsetlang.gethashlikesymbols(tree))
2209 symbols.update(revsetlang.gethashlikesymbols(tree))
2220
2210
2221 if not symbols:
2211 if not symbols:
2222 return repo
2212 return repo
2223
2213
2224 revs = _getrevsfromsymbols(repo, symbols)
2214 revs = _getrevsfromsymbols(repo, symbols)
2225
2215
2226 if not revs:
2216 if not revs:
2227 return repo
2217 return repo
2228
2218
2229 if hiddentype == b'warn':
2219 if hiddentype == b'warn':
2230 unfi = repo.unfiltered()
2220 unfi = repo.unfiltered()
2231 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2221 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2232 repo.ui.warn(
2222 repo.ui.warn(
2233 _(
2223 _(
2234 b"warning: accessing hidden changesets for write "
2224 b"warning: accessing hidden changesets for write "
2235 b"operation: %s\n"
2225 b"operation: %s\n"
2236 )
2226 )
2237 % revstr
2227 % revstr
2238 )
2228 )
2239
2229
2240 # we have to use new filtername to separate branch/tags cache until we can
2230 # we have to use new filtername to separate branch/tags cache until we can
2241 # disbale these cache when revisions are dynamically pinned.
2231 # disbale these cache when revisions are dynamically pinned.
2242 return repo.filtered(b'visible-hidden', revs)
2232 return repo.filtered(b'visible-hidden', revs)
2243
2233
2244
2234
2245 def _getrevsfromsymbols(repo, symbols):
2235 def _getrevsfromsymbols(repo, symbols):
2246 """parse the list of symbols and returns a set of revision numbers of hidden
2236 """parse the list of symbols and returns a set of revision numbers of hidden
2247 changesets present in symbols"""
2237 changesets present in symbols"""
2248 revs = set()
2238 revs = set()
2249 unfi = repo.unfiltered()
2239 unfi = repo.unfiltered()
2250 unficl = unfi.changelog
2240 unficl = unfi.changelog
2251 cl = repo.changelog
2241 cl = repo.changelog
2252 tiprev = len(unficl)
2242 tiprev = len(unficl)
2253 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2243 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2254 for s in symbols:
2244 for s in symbols:
2255 try:
2245 try:
2256 n = int(s)
2246 n = int(s)
2257 if n <= tiprev:
2247 if n <= tiprev:
2258 if not allowrevnums:
2248 if not allowrevnums:
2259 continue
2249 continue
2260 else:
2250 else:
2261 if n not in cl:
2251 if n not in cl:
2262 revs.add(n)
2252 revs.add(n)
2263 continue
2253 continue
2264 except ValueError:
2254 except ValueError:
2265 pass
2255 pass
2266
2256
2267 try:
2257 try:
2268 s = resolvehexnodeidprefix(unfi, s)
2258 s = resolvehexnodeidprefix(unfi, s)
2269 except (error.LookupError, error.WdirUnsupported):
2259 except (error.LookupError, error.WdirUnsupported):
2270 s = None
2260 s = None
2271
2261
2272 if s is not None:
2262 if s is not None:
2273 rev = unficl.rev(s)
2263 rev = unficl.rev(s)
2274 if rev not in cl:
2264 if rev not in cl:
2275 revs.add(rev)
2265 revs.add(rev)
2276
2266
2277 return revs
2267 return revs
2278
2268
2279
2269
2280 def bookmarkrevs(repo, mark):
2270 def bookmarkrevs(repo, mark):
2281 """Select revisions reachable by a given bookmark
2271 """Select revisions reachable by a given bookmark
2282
2272
2283 If the bookmarked revision isn't a head, an empty set will be returned.
2273 If the bookmarked revision isn't a head, an empty set will be returned.
2284 """
2274 """
2285 return repo.revs(format_bookmark_revspec(mark))
2275 return repo.revs(format_bookmark_revspec(mark))
2286
2276
2287
2277
2288 def format_bookmark_revspec(mark):
2278 def format_bookmark_revspec(mark):
2289 """Build a revset expression to select revisions reachable by a given
2279 """Build a revset expression to select revisions reachable by a given
2290 bookmark"""
2280 bookmark"""
2291 mark = b'literal:' + mark
2281 mark = b'literal:' + mark
2292 return revsetlang.formatspec(
2282 return revsetlang.formatspec(
2293 b"ancestors(bookmark(%s)) - "
2283 b"ancestors(bookmark(%s)) - "
2294 b"ancestors(head() and not bookmark(%s)) - "
2284 b"ancestors(head() and not bookmark(%s)) - "
2295 b"ancestors(bookmark() and not bookmark(%s))",
2285 b"ancestors(bookmark() and not bookmark(%s))",
2296 mark,
2286 mark,
2297 mark,
2287 mark,
2298 mark,
2288 mark,
2299 )
2289 )
General Comments 0
You need to be logged in to leave comments. Login now