##// END OF EJS Templates
dirstate-v2: Add support when Rust is not enabled...
Simon Sapin -
r49037:b4f83c9e default
parent child Browse files
Show More
@@ -1,1513 +1,1511 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
44 propertycache = util.propertycache
42 propertycache = util.propertycache
45 filecache = scmutil.filecache
43 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
44 _rangemask = dirstatemap.rangemask
47
45
48 DirstateItem = dirstatemap.DirstateItem
46 DirstateItem = dirstatemap.DirstateItem
49
47
50
48
51 class repocache(filecache):
49 class repocache(filecache):
52 """filecache for files in .hg/"""
50 """filecache for files in .hg/"""
53
51
54 def join(self, obj, fname):
52 def join(self, obj, fname):
55 return obj._opener.join(fname)
53 return obj._opener.join(fname)
56
54
57
55
58 class rootcache(filecache):
56 class rootcache(filecache):
59 """filecache for files in the repository root"""
57 """filecache for files in the repository root"""
60
58
61 def join(self, obj, fname):
59 def join(self, obj, fname):
62 return obj._join(fname)
60 return obj._join(fname)
63
61
64
62
65 def _getfsnow(vfs):
63 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
64 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
65 tmpfd, tmpname = vfs.mkstemp()
68 try:
66 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
67 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
68 finally:
71 os.close(tmpfd)
69 os.close(tmpfd)
72 vfs.unlink(tmpname)
70 vfs.unlink(tmpname)
73
71
74
72
75 def requires_parents_change(func):
73 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
74 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
75 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
76 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
77 msg %= func.__name__
80 raise error.ProgrammingError(msg)
78 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
79 return func(self, *args, **kwargs)
82
80
83 return wrap
81 return wrap
84
82
85
83
86 def requires_no_parents_change(func):
84 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
85 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
86 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
87 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
88 msg %= func.__name__
91 raise error.ProgrammingError(msg)
89 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
90 return func(self, *args, **kwargs)
93
91
94 return wrap
92 return wrap
95
93
96
94
97 @interfaceutil.implementer(intdirstate.idirstate)
95 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
96 class dirstate(object):
99 def __init__(
97 def __init__(
100 self,
98 self,
101 opener,
99 opener,
102 ui,
100 ui,
103 root,
101 root,
104 validate,
102 validate,
105 sparsematchfn,
103 sparsematchfn,
106 nodeconstants,
104 nodeconstants,
107 use_dirstate_v2,
105 use_dirstate_v2,
108 ):
106 ):
109 """Create a new dirstate object.
107 """Create a new dirstate object.
110
108
111 opener is an open()-like callable that can be used to open the
109 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
110 dirstate file; root is the root of the directory tracked by
113 the dirstate.
111 the dirstate.
114 """
112 """
115 self._use_dirstate_v2 = use_dirstate_v2
113 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
114 self._nodeconstants = nodeconstants
117 self._opener = opener
115 self._opener = opener
118 self._validate = validate
116 self._validate = validate
119 self._root = root
117 self._root = root
120 self._sparsematchfn = sparsematchfn
118 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
119 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
120 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
121 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
122 self._dirty = False
125 self._lastnormaltime = 0
123 self._lastnormaltime = 0
126 self._ui = ui
124 self._ui = ui
127 self._filecache = {}
125 self._filecache = {}
128 self._parentwriters = 0
126 self._parentwriters = 0
129 self._filename = b'dirstate'
127 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
128 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
129 self._plchangecallbacks = {}
132 self._origpl = None
130 self._origpl = None
133 self._mapcls = dirstatemap.dirstatemap
131 self._mapcls = dirstatemap.dirstatemap
134 # Access and cache cwd early, so we don't access it for the first time
132 # Access and cache cwd early, so we don't access it for the first time
135 # after a working-copy update caused it to not exist (accessing it then
133 # after a working-copy update caused it to not exist (accessing it then
136 # raises an exception).
134 # raises an exception).
137 self._cwd
135 self._cwd
138
136
139 def prefetch_parents(self):
137 def prefetch_parents(self):
140 """make sure the parents are loaded
138 """make sure the parents are loaded
141
139
142 Used to avoid a race condition.
140 Used to avoid a race condition.
143 """
141 """
144 self._pl
142 self._pl
145
143
146 @contextlib.contextmanager
144 @contextlib.contextmanager
147 def parentchange(self):
145 def parentchange(self):
148 """Context manager for handling dirstate parents.
146 """Context manager for handling dirstate parents.
149
147
150 If an exception occurs in the scope of the context manager,
148 If an exception occurs in the scope of the context manager,
151 the incoherent dirstate won't be written when wlock is
149 the incoherent dirstate won't be written when wlock is
152 released.
150 released.
153 """
151 """
154 self._parentwriters += 1
152 self._parentwriters += 1
155 yield
153 yield
156 # Typically we want the "undo" step of a context manager in a
154 # Typically we want the "undo" step of a context manager in a
157 # finally block so it happens even when an exception
155 # finally block so it happens even when an exception
158 # occurs. In this case, however, we only want to decrement
156 # occurs. In this case, however, we only want to decrement
159 # parentwriters if the code in the with statement exits
157 # parentwriters if the code in the with statement exits
160 # normally, so we don't have a try/finally here on purpose.
158 # normally, so we don't have a try/finally here on purpose.
161 self._parentwriters -= 1
159 self._parentwriters -= 1
162
160
163 def pendingparentchange(self):
161 def pendingparentchange(self):
164 """Returns true if the dirstate is in the middle of a set of changes
162 """Returns true if the dirstate is in the middle of a set of changes
165 that modify the dirstate parent.
163 that modify the dirstate parent.
166 """
164 """
167 return self._parentwriters > 0
165 return self._parentwriters > 0
168
166
169 @propertycache
167 @propertycache
170 def _map(self):
168 def _map(self):
171 """Return the dirstate contents (see documentation for dirstatemap)."""
169 """Return the dirstate contents (see documentation for dirstatemap)."""
172 self._map = self._mapcls(
170 self._map = self._mapcls(
173 self._ui,
171 self._ui,
174 self._opener,
172 self._opener,
175 self._root,
173 self._root,
176 self._nodeconstants,
174 self._nodeconstants,
177 self._use_dirstate_v2,
175 self._use_dirstate_v2,
178 )
176 )
179 return self._map
177 return self._map
180
178
181 @property
179 @property
182 def _sparsematcher(self):
180 def _sparsematcher(self):
183 """The matcher for the sparse checkout.
181 """The matcher for the sparse checkout.
184
182
185 The working directory may not include every file from a manifest. The
183 The working directory may not include every file from a manifest. The
186 matcher obtained by this property will match a path if it is to be
184 matcher obtained by this property will match a path if it is to be
187 included in the working directory.
185 included in the working directory.
188 """
186 """
189 # TODO there is potential to cache this property. For now, the matcher
187 # TODO there is potential to cache this property. For now, the matcher
190 # is resolved on every access. (But the called function does use a
188 # is resolved on every access. (But the called function does use a
191 # cache to keep the lookup fast.)
189 # cache to keep the lookup fast.)
192 return self._sparsematchfn()
190 return self._sparsematchfn()
193
191
194 @repocache(b'branch')
192 @repocache(b'branch')
195 def _branch(self):
193 def _branch(self):
196 try:
194 try:
197 return self._opener.read(b"branch").strip() or b"default"
195 return self._opener.read(b"branch").strip() or b"default"
198 except IOError as inst:
196 except IOError as inst:
199 if inst.errno != errno.ENOENT:
197 if inst.errno != errno.ENOENT:
200 raise
198 raise
201 return b"default"
199 return b"default"
202
200
203 @property
201 @property
204 def _pl(self):
202 def _pl(self):
205 return self._map.parents()
203 return self._map.parents()
206
204
207 def hasdir(self, d):
205 def hasdir(self, d):
208 return self._map.hastrackeddir(d)
206 return self._map.hastrackeddir(d)
209
207
210 @rootcache(b'.hgignore')
208 @rootcache(b'.hgignore')
211 def _ignore(self):
209 def _ignore(self):
212 files = self._ignorefiles()
210 files = self._ignorefiles()
213 if not files:
211 if not files:
214 return matchmod.never()
212 return matchmod.never()
215
213
216 pats = [b'include:%s' % f for f in files]
214 pats = [b'include:%s' % f for f in files]
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
215 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218
216
219 @propertycache
217 @propertycache
220 def _slash(self):
218 def _slash(self):
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
219 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222
220
223 @propertycache
221 @propertycache
224 def _checklink(self):
222 def _checklink(self):
225 return util.checklink(self._root)
223 return util.checklink(self._root)
226
224
227 @propertycache
225 @propertycache
228 def _checkexec(self):
226 def _checkexec(self):
229 return bool(util.checkexec(self._root))
227 return bool(util.checkexec(self._root))
230
228
231 @propertycache
229 @propertycache
232 def _checkcase(self):
230 def _checkcase(self):
233 return not util.fscasesensitive(self._join(b'.hg'))
231 return not util.fscasesensitive(self._join(b'.hg'))
234
232
235 def _join(self, f):
233 def _join(self, f):
236 # much faster than os.path.join()
234 # much faster than os.path.join()
237 # it's safe because f is always a relative path
235 # it's safe because f is always a relative path
238 return self._rootdir + f
236 return self._rootdir + f
239
237
240 def flagfunc(self, buildfallback):
238 def flagfunc(self, buildfallback):
241 if self._checklink and self._checkexec:
239 if self._checklink and self._checkexec:
242
240
243 def f(x):
241 def f(x):
244 try:
242 try:
245 st = os.lstat(self._join(x))
243 st = os.lstat(self._join(x))
246 if util.statislink(st):
244 if util.statislink(st):
247 return b'l'
245 return b'l'
248 if util.statisexec(st):
246 if util.statisexec(st):
249 return b'x'
247 return b'x'
250 except OSError:
248 except OSError:
251 pass
249 pass
252 return b''
250 return b''
253
251
254 return f
252 return f
255
253
256 fallback = buildfallback()
254 fallback = buildfallback()
257 if self._checklink:
255 if self._checklink:
258
256
259 def f(x):
257 def f(x):
260 if os.path.islink(self._join(x)):
258 if os.path.islink(self._join(x)):
261 return b'l'
259 return b'l'
262 if b'x' in fallback(x):
260 if b'x' in fallback(x):
263 return b'x'
261 return b'x'
264 return b''
262 return b''
265
263
266 return f
264 return f
267 if self._checkexec:
265 if self._checkexec:
268
266
269 def f(x):
267 def f(x):
270 if b'l' in fallback(x):
268 if b'l' in fallback(x):
271 return b'l'
269 return b'l'
272 if util.isexec(self._join(x)):
270 if util.isexec(self._join(x)):
273 return b'x'
271 return b'x'
274 return b''
272 return b''
275
273
276 return f
274 return f
277 else:
275 else:
278 return fallback
276 return fallback
279
277
280 @propertycache
278 @propertycache
281 def _cwd(self):
279 def _cwd(self):
282 # internal config: ui.forcecwd
280 # internal config: ui.forcecwd
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
281 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 if forcecwd:
282 if forcecwd:
285 return forcecwd
283 return forcecwd
286 return encoding.getcwd()
284 return encoding.getcwd()
287
285
288 def getcwd(self):
286 def getcwd(self):
289 """Return the path from which a canonical path is calculated.
287 """Return the path from which a canonical path is calculated.
290
288
291 This path should be used to resolve file patterns or to convert
289 This path should be used to resolve file patterns or to convert
292 canonical paths back to file paths for display. It shouldn't be
290 canonical paths back to file paths for display. It shouldn't be
293 used to get real file paths. Use vfs functions instead.
291 used to get real file paths. Use vfs functions instead.
294 """
292 """
295 cwd = self._cwd
293 cwd = self._cwd
296 if cwd == self._root:
294 if cwd == self._root:
297 return b''
295 return b''
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
296 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 rootsep = self._root
297 rootsep = self._root
300 if not util.endswithsep(rootsep):
298 if not util.endswithsep(rootsep):
301 rootsep += pycompat.ossep
299 rootsep += pycompat.ossep
302 if cwd.startswith(rootsep):
300 if cwd.startswith(rootsep):
303 return cwd[len(rootsep) :]
301 return cwd[len(rootsep) :]
304 else:
302 else:
305 # we're outside the repo. return an absolute path.
303 # we're outside the repo. return an absolute path.
306 return cwd
304 return cwd
307
305
308 def pathto(self, f, cwd=None):
306 def pathto(self, f, cwd=None):
309 if cwd is None:
307 if cwd is None:
310 cwd = self.getcwd()
308 cwd = self.getcwd()
311 path = util.pathto(self._root, cwd, f)
309 path = util.pathto(self._root, cwd, f)
312 if self._slash:
310 if self._slash:
313 return util.pconvert(path)
311 return util.pconvert(path)
314 return path
312 return path
315
313
316 def __getitem__(self, key):
314 def __getitem__(self, key):
317 """Return the current state of key (a filename) in the dirstate.
315 """Return the current state of key (a filename) in the dirstate.
318
316
319 States are:
317 States are:
320 n normal
318 n normal
321 m needs merging
319 m needs merging
322 r marked for removal
320 r marked for removal
323 a marked for addition
321 a marked for addition
324 ? not tracked
322 ? not tracked
325
323
326 XXX The "state" is a bit obscure to be in the "public" API. we should
324 XXX The "state" is a bit obscure to be in the "public" API. we should
327 consider migrating all user of this to going through the dirstate entry
325 consider migrating all user of this to going through the dirstate entry
328 instead.
326 instead.
329 """
327 """
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
328 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
329 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
332 entry = self._map.get(key)
330 entry = self._map.get(key)
333 if entry is not None:
331 if entry is not None:
334 return entry.state
332 return entry.state
335 return b'?'
333 return b'?'
336
334
337 def get_entry(self, path):
335 def get_entry(self, path):
338 """return a DirstateItem for the associated path"""
336 """return a DirstateItem for the associated path"""
339 entry = self._map.get(path)
337 entry = self._map.get(path)
340 if entry is None:
338 if entry is None:
341 return DirstateItem()
339 return DirstateItem()
342 return entry
340 return entry
343
341
344 def __contains__(self, key):
342 def __contains__(self, key):
345 return key in self._map
343 return key in self._map
346
344
347 def __iter__(self):
345 def __iter__(self):
348 return iter(sorted(self._map))
346 return iter(sorted(self._map))
349
347
350 def items(self):
348 def items(self):
351 return pycompat.iteritems(self._map)
349 return pycompat.iteritems(self._map)
352
350
353 iteritems = items
351 iteritems = items
354
352
355 def parents(self):
353 def parents(self):
356 return [self._validate(p) for p in self._pl]
354 return [self._validate(p) for p in self._pl]
357
355
358 def p1(self):
356 def p1(self):
359 return self._validate(self._pl[0])
357 return self._validate(self._pl[0])
360
358
361 def p2(self):
359 def p2(self):
362 return self._validate(self._pl[1])
360 return self._validate(self._pl[1])
363
361
364 @property
362 @property
365 def in_merge(self):
363 def in_merge(self):
366 """True if a merge is in progress"""
364 """True if a merge is in progress"""
367 return self._pl[1] != self._nodeconstants.nullid
365 return self._pl[1] != self._nodeconstants.nullid
368
366
369 def branch(self):
367 def branch(self):
370 return encoding.tolocal(self._branch)
368 return encoding.tolocal(self._branch)
371
369
372 def setparents(self, p1, p2=None):
370 def setparents(self, p1, p2=None):
373 """Set dirstate parents to p1 and p2.
371 """Set dirstate parents to p1 and p2.
374
372
375 When moving from two parents to one, "merged" entries a
373 When moving from two parents to one, "merged" entries a
376 adjusted to normal and previous copy records discarded and
374 adjusted to normal and previous copy records discarded and
377 returned by the call.
375 returned by the call.
378
376
379 See localrepo.setparents()
377 See localrepo.setparents()
380 """
378 """
381 if p2 is None:
379 if p2 is None:
382 p2 = self._nodeconstants.nullid
380 p2 = self._nodeconstants.nullid
383 if self._parentwriters == 0:
381 if self._parentwriters == 0:
384 raise ValueError(
382 raise ValueError(
385 b"cannot set dirstate parent outside of "
383 b"cannot set dirstate parent outside of "
386 b"dirstate.parentchange context manager"
384 b"dirstate.parentchange context manager"
387 )
385 )
388
386
389 self._dirty = True
387 self._dirty = True
390 oldp2 = self._pl[1]
388 oldp2 = self._pl[1]
391 if self._origpl is None:
389 if self._origpl is None:
392 self._origpl = self._pl
390 self._origpl = self._pl
393 nullid = self._nodeconstants.nullid
391 nullid = self._nodeconstants.nullid
394 # True if we need to fold p2 related state back to a linear case
392 # True if we need to fold p2 related state back to a linear case
395 fold_p2 = oldp2 != nullid and p2 == nullid
393 fold_p2 = oldp2 != nullid and p2 == nullid
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
394 return self._map.setparents(p1, p2, fold_p2=fold_p2)
397
395
398 def setbranch(self, branch):
396 def setbranch(self, branch):
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
397 self.__class__._branch.set(self, encoding.fromlocal(branch))
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
398 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
401 try:
399 try:
402 f.write(self._branch + b'\n')
400 f.write(self._branch + b'\n')
403 f.close()
401 f.close()
404
402
405 # make sure filecache has the correct stat info for _branch after
403 # make sure filecache has the correct stat info for _branch after
406 # replacing the underlying file
404 # replacing the underlying file
407 ce = self._filecache[b'_branch']
405 ce = self._filecache[b'_branch']
408 if ce:
406 if ce:
409 ce.refresh()
407 ce.refresh()
410 except: # re-raises
408 except: # re-raises
411 f.discard()
409 f.discard()
412 raise
410 raise
413
411
414 def invalidate(self):
412 def invalidate(self):
415 """Causes the next access to reread the dirstate.
413 """Causes the next access to reread the dirstate.
416
414
417 This is different from localrepo.invalidatedirstate() because it always
415 This is different from localrepo.invalidatedirstate() because it always
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
416 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
419 check whether the dirstate has changed before rereading it."""
417 check whether the dirstate has changed before rereading it."""
420
418
421 for a in ("_map", "_branch", "_ignore"):
419 for a in ("_map", "_branch", "_ignore"):
422 if a in self.__dict__:
420 if a in self.__dict__:
423 delattr(self, a)
421 delattr(self, a)
424 self._lastnormaltime = 0
422 self._lastnormaltime = 0
425 self._dirty = False
423 self._dirty = False
426 self._parentwriters = 0
424 self._parentwriters = 0
427 self._origpl = None
425 self._origpl = None
428
426
429 def copy(self, source, dest):
427 def copy(self, source, dest):
430 """Mark dest as a copy of source. Unmark dest if source is None."""
428 """Mark dest as a copy of source. Unmark dest if source is None."""
431 if source == dest:
429 if source == dest:
432 return
430 return
433 self._dirty = True
431 self._dirty = True
434 if source is not None:
432 if source is not None:
435 self._map.copymap[dest] = source
433 self._map.copymap[dest] = source
436 else:
434 else:
437 self._map.copymap.pop(dest, None)
435 self._map.copymap.pop(dest, None)
438
436
439 def copied(self, file):
437 def copied(self, file):
440 return self._map.copymap.get(file, None)
438 return self._map.copymap.get(file, None)
441
439
442 def copies(self):
440 def copies(self):
443 return self._map.copymap
441 return self._map.copymap
444
442
445 @requires_no_parents_change
443 @requires_no_parents_change
446 def set_tracked(self, filename):
444 def set_tracked(self, filename):
447 """a "public" method for generic code to mark a file as tracked
445 """a "public" method for generic code to mark a file as tracked
448
446
449 This function is to be called outside of "update/merge" case. For
447 This function is to be called outside of "update/merge" case. For
450 example by a command like `hg add X`.
448 example by a command like `hg add X`.
451
449
452 return True the file was previously untracked, False otherwise.
450 return True the file was previously untracked, False otherwise.
453 """
451 """
454 self._dirty = True
452 self._dirty = True
455 entry = self._map.get(filename)
453 entry = self._map.get(filename)
456 if entry is None or not entry.tracked:
454 if entry is None or not entry.tracked:
457 self._check_new_tracked_filename(filename)
455 self._check_new_tracked_filename(filename)
458 return self._map.set_tracked(filename)
456 return self._map.set_tracked(filename)
459
457
460 @requires_no_parents_change
458 @requires_no_parents_change
461 def set_untracked(self, filename):
459 def set_untracked(self, filename):
462 """a "public" method for generic code to mark a file as untracked
460 """a "public" method for generic code to mark a file as untracked
463
461
464 This function is to be called outside of "update/merge" case. For
462 This function is to be called outside of "update/merge" case. For
465 example by a command like `hg remove X`.
463 example by a command like `hg remove X`.
466
464
467 return True the file was previously tracked, False otherwise.
465 return True the file was previously tracked, False otherwise.
468 """
466 """
469 ret = self._map.set_untracked(filename)
467 ret = self._map.set_untracked(filename)
470 if ret:
468 if ret:
471 self._dirty = True
469 self._dirty = True
472 return ret
470 return ret
473
471
474 @requires_no_parents_change
472 @requires_no_parents_change
475 def set_clean(self, filename, parentfiledata=None):
473 def set_clean(self, filename, parentfiledata=None):
476 """record that the current state of the file on disk is known to be clean"""
474 """record that the current state of the file on disk is known to be clean"""
477 self._dirty = True
475 self._dirty = True
478 if parentfiledata:
476 if parentfiledata:
479 (mode, size, mtime) = parentfiledata
477 (mode, size, mtime) = parentfiledata
480 else:
478 else:
481 (mode, size, mtime) = self._get_filedata(filename)
479 (mode, size, mtime) = self._get_filedata(filename)
482 if not self._map[filename].tracked:
480 if not self._map[filename].tracked:
483 self._check_new_tracked_filename(filename)
481 self._check_new_tracked_filename(filename)
484 self._map.set_clean(filename, mode, size, mtime)
482 self._map.set_clean(filename, mode, size, mtime)
485 if mtime > self._lastnormaltime:
483 if mtime > self._lastnormaltime:
486 # Remember the most recent modification timeslot for status(),
484 # Remember the most recent modification timeslot for status(),
487 # to make sure we won't miss future size-preserving file content
485 # to make sure we won't miss future size-preserving file content
488 # modifications that happen within the same timeslot.
486 # modifications that happen within the same timeslot.
489 self._lastnormaltime = mtime
487 self._lastnormaltime = mtime
490
488
491 @requires_no_parents_change
489 @requires_no_parents_change
492 def set_possibly_dirty(self, filename):
490 def set_possibly_dirty(self, filename):
493 """record that the current state of the file on disk is unknown"""
491 """record that the current state of the file on disk is unknown"""
494 self._dirty = True
492 self._dirty = True
495 self._map.set_possibly_dirty(filename)
493 self._map.set_possibly_dirty(filename)
496
494
497 @requires_parents_change
495 @requires_parents_change
498 def update_file_p1(
496 def update_file_p1(
499 self,
497 self,
500 filename,
498 filename,
501 p1_tracked,
499 p1_tracked,
502 ):
500 ):
503 """Set a file as tracked in the parent (or not)
501 """Set a file as tracked in the parent (or not)
504
502
505 This is to be called when adjust the dirstate to a new parent after an history
503 This is to be called when adjust the dirstate to a new parent after an history
506 rewriting operation.
504 rewriting operation.
507
505
508 It should not be called during a merge (p2 != nullid) and only within
506 It should not be called during a merge (p2 != nullid) and only within
509 a `with dirstate.parentchange():` context.
507 a `with dirstate.parentchange():` context.
510 """
508 """
511 if self.in_merge:
509 if self.in_merge:
512 msg = b'update_file_reference should not be called when merging'
510 msg = b'update_file_reference should not be called when merging'
513 raise error.ProgrammingError(msg)
511 raise error.ProgrammingError(msg)
514 entry = self._map.get(filename)
512 entry = self._map.get(filename)
515 if entry is None:
513 if entry is None:
516 wc_tracked = False
514 wc_tracked = False
517 else:
515 else:
518 wc_tracked = entry.tracked
516 wc_tracked = entry.tracked
519 if not (p1_tracked or wc_tracked):
517 if not (p1_tracked or wc_tracked):
520 # the file is no longer relevant to anyone
518 # the file is no longer relevant to anyone
521 if self._map.get(filename) is not None:
519 if self._map.get(filename) is not None:
522 self._map.reset_state(filename)
520 self._map.reset_state(filename)
523 self._dirty = True
521 self._dirty = True
524 elif (not p1_tracked) and wc_tracked:
522 elif (not p1_tracked) and wc_tracked:
525 if entry is not None and entry.added:
523 if entry is not None and entry.added:
526 return # avoid dropping copy information (maybe?)
524 return # avoid dropping copy information (maybe?)
527
525
528 parentfiledata = None
526 parentfiledata = None
529 if wc_tracked and p1_tracked:
527 if wc_tracked and p1_tracked:
530 parentfiledata = self._get_filedata(filename)
528 parentfiledata = self._get_filedata(filename)
531
529
532 self._map.reset_state(
530 self._map.reset_state(
533 filename,
531 filename,
534 wc_tracked,
532 wc_tracked,
535 p1_tracked,
533 p1_tracked,
536 # the underlying reference might have changed, we will have to
534 # the underlying reference might have changed, we will have to
537 # check it.
535 # check it.
538 has_meaningful_mtime=False,
536 has_meaningful_mtime=False,
539 parentfiledata=parentfiledata,
537 parentfiledata=parentfiledata,
540 )
538 )
541 if (
539 if (
542 parentfiledata is not None
540 parentfiledata is not None
543 and parentfiledata[2] > self._lastnormaltime
541 and parentfiledata[2] > self._lastnormaltime
544 ):
542 ):
545 # Remember the most recent modification timeslot for status(),
543 # Remember the most recent modification timeslot for status(),
546 # to make sure we won't miss future size-preserving file content
544 # to make sure we won't miss future size-preserving file content
547 # modifications that happen within the same timeslot.
545 # modifications that happen within the same timeslot.
548 self._lastnormaltime = parentfiledata[2]
546 self._lastnormaltime = parentfiledata[2]
549
547
550 @requires_parents_change
548 @requires_parents_change
551 def update_file(
549 def update_file(
552 self,
550 self,
553 filename,
551 filename,
554 wc_tracked,
552 wc_tracked,
555 p1_tracked,
553 p1_tracked,
556 p2_info=False,
554 p2_info=False,
557 possibly_dirty=False,
555 possibly_dirty=False,
558 parentfiledata=None,
556 parentfiledata=None,
559 ):
557 ):
560 """update the information about a file in the dirstate
558 """update the information about a file in the dirstate
561
559
562 This is to be called when the direstates parent changes to keep track
560 This is to be called when the direstates parent changes to keep track
563 of what is the file situation in regards to the working copy and its parent.
561 of what is the file situation in regards to the working copy and its parent.
564
562
565 This function must be called within a `dirstate.parentchange` context.
563 This function must be called within a `dirstate.parentchange` context.
566
564
567 note: the API is at an early stage and we might need to adjust it
565 note: the API is at an early stage and we might need to adjust it
568 depending of what information ends up being relevant and useful to
566 depending of what information ends up being relevant and useful to
569 other processing.
567 other processing.
570 """
568 """
571
569
572 # note: I do not think we need to double check name clash here since we
570 # note: I do not think we need to double check name clash here since we
573 # are in a update/merge case that should already have taken care of
571 # are in a update/merge case that should already have taken care of
574 # this. The test agrees
572 # this. The test agrees
575
573
576 self._dirty = True
574 self._dirty = True
577
575
578 need_parent_file_data = (
576 need_parent_file_data = (
579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
577 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
580 )
578 )
581
579
582 if need_parent_file_data and parentfiledata is None:
580 if need_parent_file_data and parentfiledata is None:
583 parentfiledata = self._get_filedata(filename)
581 parentfiledata = self._get_filedata(filename)
584
582
585 self._map.reset_state(
583 self._map.reset_state(
586 filename,
584 filename,
587 wc_tracked,
585 wc_tracked,
588 p1_tracked,
586 p1_tracked,
589 p2_info=p2_info,
587 p2_info=p2_info,
590 has_meaningful_mtime=not possibly_dirty,
588 has_meaningful_mtime=not possibly_dirty,
591 parentfiledata=parentfiledata,
589 parentfiledata=parentfiledata,
592 )
590 )
593 if (
591 if (
594 parentfiledata is not None
592 parentfiledata is not None
595 and parentfiledata[2] > self._lastnormaltime
593 and parentfiledata[2] > self._lastnormaltime
596 ):
594 ):
597 # Remember the most recent modification timeslot for status(),
595 # Remember the most recent modification timeslot for status(),
598 # to make sure we won't miss future size-preserving file content
596 # to make sure we won't miss future size-preserving file content
599 # modifications that happen within the same timeslot.
597 # modifications that happen within the same timeslot.
600 self._lastnormaltime = parentfiledata[2]
598 self._lastnormaltime = parentfiledata[2]
601
599
602 def _check_new_tracked_filename(self, filename):
600 def _check_new_tracked_filename(self, filename):
603 scmutil.checkfilename(filename)
601 scmutil.checkfilename(filename)
604 if self._map.hastrackeddir(filename):
602 if self._map.hastrackeddir(filename):
605 msg = _(b'directory %r already in dirstate')
603 msg = _(b'directory %r already in dirstate')
606 msg %= pycompat.bytestr(filename)
604 msg %= pycompat.bytestr(filename)
607 raise error.Abort(msg)
605 raise error.Abort(msg)
608 # shadows
606 # shadows
609 for d in pathutil.finddirs(filename):
607 for d in pathutil.finddirs(filename):
610 if self._map.hastrackeddir(d):
608 if self._map.hastrackeddir(d):
611 break
609 break
612 entry = self._map.get(d)
610 entry = self._map.get(d)
613 if entry is not None and not entry.removed:
611 if entry is not None and not entry.removed:
614 msg = _(b'file %r in dirstate clashes with %r')
612 msg = _(b'file %r in dirstate clashes with %r')
615 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
613 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
616 raise error.Abort(msg)
614 raise error.Abort(msg)
617
615
618 def _get_filedata(self, filename):
616 def _get_filedata(self, filename):
619 """returns"""
617 """returns"""
620 s = os.lstat(self._join(filename))
618 s = os.lstat(self._join(filename))
621 mode = s.st_mode
619 mode = s.st_mode
622 size = s.st_size
620 size = s.st_size
623 mtime = s[stat.ST_MTIME]
621 mtime = s[stat.ST_MTIME]
624 return (mode, size, mtime)
622 return (mode, size, mtime)
625
623
626 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
624 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
627 if exists is None:
625 if exists is None:
628 exists = os.path.lexists(os.path.join(self._root, path))
626 exists = os.path.lexists(os.path.join(self._root, path))
629 if not exists:
627 if not exists:
630 # Maybe a path component exists
628 # Maybe a path component exists
631 if not ignoremissing and b'/' in path:
629 if not ignoremissing and b'/' in path:
632 d, f = path.rsplit(b'/', 1)
630 d, f = path.rsplit(b'/', 1)
633 d = self._normalize(d, False, ignoremissing, None)
631 d = self._normalize(d, False, ignoremissing, None)
634 folded = d + b"/" + f
632 folded = d + b"/" + f
635 else:
633 else:
636 # No path components, preserve original case
634 # No path components, preserve original case
637 folded = path
635 folded = path
638 else:
636 else:
639 # recursively normalize leading directory components
637 # recursively normalize leading directory components
640 # against dirstate
638 # against dirstate
641 if b'/' in normed:
639 if b'/' in normed:
642 d, f = normed.rsplit(b'/', 1)
640 d, f = normed.rsplit(b'/', 1)
643 d = self._normalize(d, False, ignoremissing, True)
641 d = self._normalize(d, False, ignoremissing, True)
644 r = self._root + b"/" + d
642 r = self._root + b"/" + d
645 folded = d + b"/" + util.fspath(f, r)
643 folded = d + b"/" + util.fspath(f, r)
646 else:
644 else:
647 folded = util.fspath(normed, self._root)
645 folded = util.fspath(normed, self._root)
648 storemap[normed] = folded
646 storemap[normed] = folded
649
647
650 return folded
648 return folded
651
649
652 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
650 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
653 normed = util.normcase(path)
651 normed = util.normcase(path)
654 folded = self._map.filefoldmap.get(normed, None)
652 folded = self._map.filefoldmap.get(normed, None)
655 if folded is None:
653 if folded is None:
656 if isknown:
654 if isknown:
657 folded = path
655 folded = path
658 else:
656 else:
659 folded = self._discoverpath(
657 folded = self._discoverpath(
660 path, normed, ignoremissing, exists, self._map.filefoldmap
658 path, normed, ignoremissing, exists, self._map.filefoldmap
661 )
659 )
662 return folded
660 return folded
663
661
664 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
662 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
665 normed = util.normcase(path)
663 normed = util.normcase(path)
666 folded = self._map.filefoldmap.get(normed, None)
664 folded = self._map.filefoldmap.get(normed, None)
667 if folded is None:
665 if folded is None:
668 folded = self._map.dirfoldmap.get(normed, None)
666 folded = self._map.dirfoldmap.get(normed, None)
669 if folded is None:
667 if folded is None:
670 if isknown:
668 if isknown:
671 folded = path
669 folded = path
672 else:
670 else:
673 # store discovered result in dirfoldmap so that future
671 # store discovered result in dirfoldmap so that future
674 # normalizefile calls don't start matching directories
672 # normalizefile calls don't start matching directories
675 folded = self._discoverpath(
673 folded = self._discoverpath(
676 path, normed, ignoremissing, exists, self._map.dirfoldmap
674 path, normed, ignoremissing, exists, self._map.dirfoldmap
677 )
675 )
678 return folded
676 return folded
679
677
680 def normalize(self, path, isknown=False, ignoremissing=False):
678 def normalize(self, path, isknown=False, ignoremissing=False):
681 """
679 """
682 normalize the case of a pathname when on a casefolding filesystem
680 normalize the case of a pathname when on a casefolding filesystem
683
681
684 isknown specifies whether the filename came from walking the
682 isknown specifies whether the filename came from walking the
685 disk, to avoid extra filesystem access.
683 disk, to avoid extra filesystem access.
686
684
687 If ignoremissing is True, missing path are returned
685 If ignoremissing is True, missing path are returned
688 unchanged. Otherwise, we try harder to normalize possibly
686 unchanged. Otherwise, we try harder to normalize possibly
689 existing path components.
687 existing path components.
690
688
691 The normalized case is determined based on the following precedence:
689 The normalized case is determined based on the following precedence:
692
690
693 - version of name already stored in the dirstate
691 - version of name already stored in the dirstate
694 - version of name stored on disk
692 - version of name stored on disk
695 - version provided via command arguments
693 - version provided via command arguments
696 """
694 """
697
695
698 if self._checkcase:
696 if self._checkcase:
699 return self._normalize(path, isknown, ignoremissing)
697 return self._normalize(path, isknown, ignoremissing)
700 return path
698 return path
701
699
702 def clear(self):
700 def clear(self):
703 self._map.clear()
701 self._map.clear()
704 self._lastnormaltime = 0
702 self._lastnormaltime = 0
705 self._dirty = True
703 self._dirty = True
706
704
707 def rebuild(self, parent, allfiles, changedfiles=None):
705 def rebuild(self, parent, allfiles, changedfiles=None):
708 if changedfiles is None:
706 if changedfiles is None:
709 # Rebuild entire dirstate
707 # Rebuild entire dirstate
710 to_lookup = allfiles
708 to_lookup = allfiles
711 to_drop = []
709 to_drop = []
712 lastnormaltime = self._lastnormaltime
710 lastnormaltime = self._lastnormaltime
713 self.clear()
711 self.clear()
714 self._lastnormaltime = lastnormaltime
712 self._lastnormaltime = lastnormaltime
715 elif len(changedfiles) < 10:
713 elif len(changedfiles) < 10:
716 # Avoid turning allfiles into a set, which can be expensive if it's
714 # Avoid turning allfiles into a set, which can be expensive if it's
717 # large.
715 # large.
718 to_lookup = []
716 to_lookup = []
719 to_drop = []
717 to_drop = []
720 for f in changedfiles:
718 for f in changedfiles:
721 if f in allfiles:
719 if f in allfiles:
722 to_lookup.append(f)
720 to_lookup.append(f)
723 else:
721 else:
724 to_drop.append(f)
722 to_drop.append(f)
725 else:
723 else:
726 changedfilesset = set(changedfiles)
724 changedfilesset = set(changedfiles)
727 to_lookup = changedfilesset & set(allfiles)
725 to_lookup = changedfilesset & set(allfiles)
728 to_drop = changedfilesset - to_lookup
726 to_drop = changedfilesset - to_lookup
729
727
730 if self._origpl is None:
728 if self._origpl is None:
731 self._origpl = self._pl
729 self._origpl = self._pl
732 self._map.setparents(parent, self._nodeconstants.nullid)
730 self._map.setparents(parent, self._nodeconstants.nullid)
733
731
734 for f in to_lookup:
732 for f in to_lookup:
735
733
736 if self.in_merge:
734 if self.in_merge:
737 self.set_tracked(f)
735 self.set_tracked(f)
738 else:
736 else:
739 self._map.reset_state(
737 self._map.reset_state(
740 f,
738 f,
741 wc_tracked=True,
739 wc_tracked=True,
742 p1_tracked=True,
740 p1_tracked=True,
743 )
741 )
744 for f in to_drop:
742 for f in to_drop:
745 self._map.reset_state(f)
743 self._map.reset_state(f)
746
744
747 self._dirty = True
745 self._dirty = True
748
746
749 def identity(self):
747 def identity(self):
750 """Return identity of dirstate itself to detect changing in storage
748 """Return identity of dirstate itself to detect changing in storage
751
749
752 If identity of previous dirstate is equal to this, writing
750 If identity of previous dirstate is equal to this, writing
753 changes based on the former dirstate out can keep consistency.
751 changes based on the former dirstate out can keep consistency.
754 """
752 """
755 return self._map.identity
753 return self._map.identity
756
754
757 def write(self, tr):
755 def write(self, tr):
758 if not self._dirty:
756 if not self._dirty:
759 return
757 return
760
758
761 filename = self._filename
759 filename = self._filename
762 if tr:
760 if tr:
763 # 'dirstate.write()' is not only for writing in-memory
761 # 'dirstate.write()' is not only for writing in-memory
764 # changes out, but also for dropping ambiguous timestamp.
762 # changes out, but also for dropping ambiguous timestamp.
765 # delayed writing re-raise "ambiguous timestamp issue".
763 # delayed writing re-raise "ambiguous timestamp issue".
766 # See also the wiki page below for detail:
764 # See also the wiki page below for detail:
767 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
765 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
768
766
769 # record when mtime start to be ambiguous
767 # record when mtime start to be ambiguous
770 now = _getfsnow(self._opener)
768 now = _getfsnow(self._opener)
771
769
772 # delay writing in-memory changes out
770 # delay writing in-memory changes out
773 tr.addfilegenerator(
771 tr.addfilegenerator(
774 b'dirstate',
772 b'dirstate',
775 (self._filename,),
773 (self._filename,),
776 lambda f: self._writedirstate(tr, f, now=now),
774 lambda f: self._writedirstate(tr, f, now=now),
777 location=b'plain',
775 location=b'plain',
778 )
776 )
779 return
777 return
780
778
781 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
779 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
782 self._writedirstate(tr, st)
780 self._writedirstate(tr, st)
783
781
784 def addparentchangecallback(self, category, callback):
782 def addparentchangecallback(self, category, callback):
785 """add a callback to be called when the wd parents are changed
783 """add a callback to be called when the wd parents are changed
786
784
787 Callback will be called with the following arguments:
785 Callback will be called with the following arguments:
788 dirstate, (oldp1, oldp2), (newp1, newp2)
786 dirstate, (oldp1, oldp2), (newp1, newp2)
789
787
790 Category is a unique identifier to allow overwriting an old callback
788 Category is a unique identifier to allow overwriting an old callback
791 with a newer callback.
789 with a newer callback.
792 """
790 """
793 self._plchangecallbacks[category] = callback
791 self._plchangecallbacks[category] = callback
794
792
795 def _writedirstate(self, tr, st, now=None):
793 def _writedirstate(self, tr, st, now=None):
796 # notify callbacks about parents change
794 # notify callbacks about parents change
797 if self._origpl is not None and self._origpl != self._pl:
795 if self._origpl is not None and self._origpl != self._pl:
798 for c, callback in sorted(
796 for c, callback in sorted(
799 pycompat.iteritems(self._plchangecallbacks)
797 pycompat.iteritems(self._plchangecallbacks)
800 ):
798 ):
801 callback(self, self._origpl, self._pl)
799 callback(self, self._origpl, self._pl)
802 self._origpl = None
800 self._origpl = None
803
801
804 if now is None:
802 if now is None:
805 # use the modification time of the newly created temporary file as the
803 # use the modification time of the newly created temporary file as the
806 # filesystem's notion of 'now'
804 # filesystem's notion of 'now'
807 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
805 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
808
806
809 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
807 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
810 # timestamp of each entries in dirstate, because of 'now > mtime'
808 # timestamp of each entries in dirstate, because of 'now > mtime'
811 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
809 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
812 if delaywrite > 0:
810 if delaywrite > 0:
813 # do we have any files to delay for?
811 # do we have any files to delay for?
814 for f, e in pycompat.iteritems(self._map):
812 for f, e in pycompat.iteritems(self._map):
815 if e.need_delay(now):
813 if e.need_delay(now):
816 import time # to avoid useless import
814 import time # to avoid useless import
817
815
818 # rather than sleep n seconds, sleep until the next
816 # rather than sleep n seconds, sleep until the next
819 # multiple of n seconds
817 # multiple of n seconds
820 clock = time.time()
818 clock = time.time()
821 start = int(clock) - (int(clock) % delaywrite)
819 start = int(clock) - (int(clock) % delaywrite)
822 end = start + delaywrite
820 end = start + delaywrite
823 time.sleep(end - clock)
821 time.sleep(end - clock)
824 now = end # trust our estimate that the end is near now
822 now = end # trust our estimate that the end is near now
825 break
823 break
826
824
827 self._map.write(tr, st, now)
825 self._map.write(tr, st, now)
828 self._lastnormaltime = 0
826 self._lastnormaltime = 0
829 self._dirty = False
827 self._dirty = False
830
828
831 def _dirignore(self, f):
829 def _dirignore(self, f):
832 if self._ignore(f):
830 if self._ignore(f):
833 return True
831 return True
834 for p in pathutil.finddirs(f):
832 for p in pathutil.finddirs(f):
835 if self._ignore(p):
833 if self._ignore(p):
836 return True
834 return True
837 return False
835 return False
838
836
839 def _ignorefiles(self):
837 def _ignorefiles(self):
840 files = []
838 files = []
841 if os.path.exists(self._join(b'.hgignore')):
839 if os.path.exists(self._join(b'.hgignore')):
842 files.append(self._join(b'.hgignore'))
840 files.append(self._join(b'.hgignore'))
843 for name, path in self._ui.configitems(b"ui"):
841 for name, path in self._ui.configitems(b"ui"):
844 if name == b'ignore' or name.startswith(b'ignore.'):
842 if name == b'ignore' or name.startswith(b'ignore.'):
845 # we need to use os.path.join here rather than self._join
843 # we need to use os.path.join here rather than self._join
846 # because path is arbitrary and user-specified
844 # because path is arbitrary and user-specified
847 files.append(os.path.join(self._rootdir, util.expandpath(path)))
845 files.append(os.path.join(self._rootdir, util.expandpath(path)))
848 return files
846 return files
849
847
850 def _ignorefileandline(self, f):
848 def _ignorefileandline(self, f):
851 files = collections.deque(self._ignorefiles())
849 files = collections.deque(self._ignorefiles())
852 visited = set()
850 visited = set()
853 while files:
851 while files:
854 i = files.popleft()
852 i = files.popleft()
855 patterns = matchmod.readpatternfile(
853 patterns = matchmod.readpatternfile(
856 i, self._ui.warn, sourceinfo=True
854 i, self._ui.warn, sourceinfo=True
857 )
855 )
858 for pattern, lineno, line in patterns:
856 for pattern, lineno, line in patterns:
859 kind, p = matchmod._patsplit(pattern, b'glob')
857 kind, p = matchmod._patsplit(pattern, b'glob')
860 if kind == b"subinclude":
858 if kind == b"subinclude":
861 if p not in visited:
859 if p not in visited:
862 files.append(p)
860 files.append(p)
863 continue
861 continue
864 m = matchmod.match(
862 m = matchmod.match(
865 self._root, b'', [], [pattern], warn=self._ui.warn
863 self._root, b'', [], [pattern], warn=self._ui.warn
866 )
864 )
867 if m(f):
865 if m(f):
868 return (i, lineno, line)
866 return (i, lineno, line)
869 visited.add(i)
867 visited.add(i)
870 return (None, -1, b"")
868 return (None, -1, b"")
871
869
872 def _walkexplicit(self, match, subrepos):
870 def _walkexplicit(self, match, subrepos):
873 """Get stat data about the files explicitly specified by match.
871 """Get stat data about the files explicitly specified by match.
874
872
875 Return a triple (results, dirsfound, dirsnotfound).
873 Return a triple (results, dirsfound, dirsnotfound).
876 - results is a mapping from filename to stat result. It also contains
874 - results is a mapping from filename to stat result. It also contains
877 listings mapping subrepos and .hg to None.
875 listings mapping subrepos and .hg to None.
878 - dirsfound is a list of files found to be directories.
876 - dirsfound is a list of files found to be directories.
879 - dirsnotfound is a list of files that the dirstate thinks are
877 - dirsnotfound is a list of files that the dirstate thinks are
880 directories and that were not found."""
878 directories and that were not found."""
881
879
882 def badtype(mode):
880 def badtype(mode):
883 kind = _(b'unknown')
881 kind = _(b'unknown')
884 if stat.S_ISCHR(mode):
882 if stat.S_ISCHR(mode):
885 kind = _(b'character device')
883 kind = _(b'character device')
886 elif stat.S_ISBLK(mode):
884 elif stat.S_ISBLK(mode):
887 kind = _(b'block device')
885 kind = _(b'block device')
888 elif stat.S_ISFIFO(mode):
886 elif stat.S_ISFIFO(mode):
889 kind = _(b'fifo')
887 kind = _(b'fifo')
890 elif stat.S_ISSOCK(mode):
888 elif stat.S_ISSOCK(mode):
891 kind = _(b'socket')
889 kind = _(b'socket')
892 elif stat.S_ISDIR(mode):
890 elif stat.S_ISDIR(mode):
893 kind = _(b'directory')
891 kind = _(b'directory')
894 return _(b'unsupported file type (type is %s)') % kind
892 return _(b'unsupported file type (type is %s)') % kind
895
893
896 badfn = match.bad
894 badfn = match.bad
897 dmap = self._map
895 dmap = self._map
898 lstat = os.lstat
896 lstat = os.lstat
899 getkind = stat.S_IFMT
897 getkind = stat.S_IFMT
900 dirkind = stat.S_IFDIR
898 dirkind = stat.S_IFDIR
901 regkind = stat.S_IFREG
899 regkind = stat.S_IFREG
902 lnkkind = stat.S_IFLNK
900 lnkkind = stat.S_IFLNK
903 join = self._join
901 join = self._join
904 dirsfound = []
902 dirsfound = []
905 foundadd = dirsfound.append
903 foundadd = dirsfound.append
906 dirsnotfound = []
904 dirsnotfound = []
907 notfoundadd = dirsnotfound.append
905 notfoundadd = dirsnotfound.append
908
906
909 if not match.isexact() and self._checkcase:
907 if not match.isexact() and self._checkcase:
910 normalize = self._normalize
908 normalize = self._normalize
911 else:
909 else:
912 normalize = None
910 normalize = None
913
911
914 files = sorted(match.files())
912 files = sorted(match.files())
915 subrepos.sort()
913 subrepos.sort()
916 i, j = 0, 0
914 i, j = 0, 0
917 while i < len(files) and j < len(subrepos):
915 while i < len(files) and j < len(subrepos):
918 subpath = subrepos[j] + b"/"
916 subpath = subrepos[j] + b"/"
919 if files[i] < subpath:
917 if files[i] < subpath:
920 i += 1
918 i += 1
921 continue
919 continue
922 while i < len(files) and files[i].startswith(subpath):
920 while i < len(files) and files[i].startswith(subpath):
923 del files[i]
921 del files[i]
924 j += 1
922 j += 1
925
923
926 if not files or b'' in files:
924 if not files or b'' in files:
927 files = [b'']
925 files = [b'']
928 # constructing the foldmap is expensive, so don't do it for the
926 # constructing the foldmap is expensive, so don't do it for the
929 # common case where files is ['']
927 # common case where files is ['']
930 normalize = None
928 normalize = None
931 results = dict.fromkeys(subrepos)
929 results = dict.fromkeys(subrepos)
932 results[b'.hg'] = None
930 results[b'.hg'] = None
933
931
934 for ff in files:
932 for ff in files:
935 if normalize:
933 if normalize:
936 nf = normalize(ff, False, True)
934 nf = normalize(ff, False, True)
937 else:
935 else:
938 nf = ff
936 nf = ff
939 if nf in results:
937 if nf in results:
940 continue
938 continue
941
939
942 try:
940 try:
943 st = lstat(join(nf))
941 st = lstat(join(nf))
944 kind = getkind(st.st_mode)
942 kind = getkind(st.st_mode)
945 if kind == dirkind:
943 if kind == dirkind:
946 if nf in dmap:
944 if nf in dmap:
947 # file replaced by dir on disk but still in dirstate
945 # file replaced by dir on disk but still in dirstate
948 results[nf] = None
946 results[nf] = None
949 foundadd((nf, ff))
947 foundadd((nf, ff))
950 elif kind == regkind or kind == lnkkind:
948 elif kind == regkind or kind == lnkkind:
951 results[nf] = st
949 results[nf] = st
952 else:
950 else:
953 badfn(ff, badtype(kind))
951 badfn(ff, badtype(kind))
954 if nf in dmap:
952 if nf in dmap:
955 results[nf] = None
953 results[nf] = None
956 except OSError as inst: # nf not found on disk - it is dirstate only
954 except OSError as inst: # nf not found on disk - it is dirstate only
957 if nf in dmap: # does it exactly match a missing file?
955 if nf in dmap: # does it exactly match a missing file?
958 results[nf] = None
956 results[nf] = None
959 else: # does it match a missing directory?
957 else: # does it match a missing directory?
960 if self._map.hasdir(nf):
958 if self._map.hasdir(nf):
961 notfoundadd(nf)
959 notfoundadd(nf)
962 else:
960 else:
963 badfn(ff, encoding.strtolocal(inst.strerror))
961 badfn(ff, encoding.strtolocal(inst.strerror))
964
962
965 # match.files() may contain explicitly-specified paths that shouldn't
963 # match.files() may contain explicitly-specified paths that shouldn't
966 # be taken; drop them from the list of files found. dirsfound/notfound
964 # be taken; drop them from the list of files found. dirsfound/notfound
967 # aren't filtered here because they will be tested later.
965 # aren't filtered here because they will be tested later.
968 if match.anypats():
966 if match.anypats():
969 for f in list(results):
967 for f in list(results):
970 if f == b'.hg' or f in subrepos:
968 if f == b'.hg' or f in subrepos:
971 # keep sentinel to disable further out-of-repo walks
969 # keep sentinel to disable further out-of-repo walks
972 continue
970 continue
973 if not match(f):
971 if not match(f):
974 del results[f]
972 del results[f]
975
973
976 # Case insensitive filesystems cannot rely on lstat() failing to detect
974 # Case insensitive filesystems cannot rely on lstat() failing to detect
977 # a case-only rename. Prune the stat object for any file that does not
975 # a case-only rename. Prune the stat object for any file that does not
978 # match the case in the filesystem, if there are multiple files that
976 # match the case in the filesystem, if there are multiple files that
979 # normalize to the same path.
977 # normalize to the same path.
980 if match.isexact() and self._checkcase:
978 if match.isexact() and self._checkcase:
981 normed = {}
979 normed = {}
982
980
983 for f, st in pycompat.iteritems(results):
981 for f, st in pycompat.iteritems(results):
984 if st is None:
982 if st is None:
985 continue
983 continue
986
984
987 nc = util.normcase(f)
985 nc = util.normcase(f)
988 paths = normed.get(nc)
986 paths = normed.get(nc)
989
987
990 if paths is None:
988 if paths is None:
991 paths = set()
989 paths = set()
992 normed[nc] = paths
990 normed[nc] = paths
993
991
994 paths.add(f)
992 paths.add(f)
995
993
996 for norm, paths in pycompat.iteritems(normed):
994 for norm, paths in pycompat.iteritems(normed):
997 if len(paths) > 1:
995 if len(paths) > 1:
998 for path in paths:
996 for path in paths:
999 folded = self._discoverpath(
997 folded = self._discoverpath(
1000 path, norm, True, None, self._map.dirfoldmap
998 path, norm, True, None, self._map.dirfoldmap
1001 )
999 )
1002 if path != folded:
1000 if path != folded:
1003 results[path] = None
1001 results[path] = None
1004
1002
1005 return results, dirsfound, dirsnotfound
1003 return results, dirsfound, dirsnotfound
1006
1004
1007 def walk(self, match, subrepos, unknown, ignored, full=True):
1005 def walk(self, match, subrepos, unknown, ignored, full=True):
1008 """
1006 """
1009 Walk recursively through the directory tree, finding all files
1007 Walk recursively through the directory tree, finding all files
1010 matched by match.
1008 matched by match.
1011
1009
1012 If full is False, maybe skip some known-clean files.
1010 If full is False, maybe skip some known-clean files.
1013
1011
1014 Return a dict mapping filename to stat-like object (either
1012 Return a dict mapping filename to stat-like object (either
1015 mercurial.osutil.stat instance or return value of os.stat()).
1013 mercurial.osutil.stat instance or return value of os.stat()).
1016
1014
1017 """
1015 """
1018 # full is a flag that extensions that hook into walk can use -- this
1016 # full is a flag that extensions that hook into walk can use -- this
1019 # implementation doesn't use it at all. This satisfies the contract
1017 # implementation doesn't use it at all. This satisfies the contract
1020 # because we only guarantee a "maybe".
1018 # because we only guarantee a "maybe".
1021
1019
1022 if ignored:
1020 if ignored:
1023 ignore = util.never
1021 ignore = util.never
1024 dirignore = util.never
1022 dirignore = util.never
1025 elif unknown:
1023 elif unknown:
1026 ignore = self._ignore
1024 ignore = self._ignore
1027 dirignore = self._dirignore
1025 dirignore = self._dirignore
1028 else:
1026 else:
1029 # if not unknown and not ignored, drop dir recursion and step 2
1027 # if not unknown and not ignored, drop dir recursion and step 2
1030 ignore = util.always
1028 ignore = util.always
1031 dirignore = util.always
1029 dirignore = util.always
1032
1030
1033 matchfn = match.matchfn
1031 matchfn = match.matchfn
1034 matchalways = match.always()
1032 matchalways = match.always()
1035 matchtdir = match.traversedir
1033 matchtdir = match.traversedir
1036 dmap = self._map
1034 dmap = self._map
1037 listdir = util.listdir
1035 listdir = util.listdir
1038 lstat = os.lstat
1036 lstat = os.lstat
1039 dirkind = stat.S_IFDIR
1037 dirkind = stat.S_IFDIR
1040 regkind = stat.S_IFREG
1038 regkind = stat.S_IFREG
1041 lnkkind = stat.S_IFLNK
1039 lnkkind = stat.S_IFLNK
1042 join = self._join
1040 join = self._join
1043
1041
1044 exact = skipstep3 = False
1042 exact = skipstep3 = False
1045 if match.isexact(): # match.exact
1043 if match.isexact(): # match.exact
1046 exact = True
1044 exact = True
1047 dirignore = util.always # skip step 2
1045 dirignore = util.always # skip step 2
1048 elif match.prefix(): # match.match, no patterns
1046 elif match.prefix(): # match.match, no patterns
1049 skipstep3 = True
1047 skipstep3 = True
1050
1048
1051 if not exact and self._checkcase:
1049 if not exact and self._checkcase:
1052 normalize = self._normalize
1050 normalize = self._normalize
1053 normalizefile = self._normalizefile
1051 normalizefile = self._normalizefile
1054 skipstep3 = False
1052 skipstep3 = False
1055 else:
1053 else:
1056 normalize = self._normalize
1054 normalize = self._normalize
1057 normalizefile = None
1055 normalizefile = None
1058
1056
1059 # step 1: find all explicit files
1057 # step 1: find all explicit files
1060 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1058 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1061 if matchtdir:
1059 if matchtdir:
1062 for d in work:
1060 for d in work:
1063 matchtdir(d[0])
1061 matchtdir(d[0])
1064 for d in dirsnotfound:
1062 for d in dirsnotfound:
1065 matchtdir(d)
1063 matchtdir(d)
1066
1064
1067 skipstep3 = skipstep3 and not (work or dirsnotfound)
1065 skipstep3 = skipstep3 and not (work or dirsnotfound)
1068 work = [d for d in work if not dirignore(d[0])]
1066 work = [d for d in work if not dirignore(d[0])]
1069
1067
1070 # step 2: visit subdirectories
1068 # step 2: visit subdirectories
1071 def traverse(work, alreadynormed):
1069 def traverse(work, alreadynormed):
1072 wadd = work.append
1070 wadd = work.append
1073 while work:
1071 while work:
1074 tracing.counter('dirstate.walk work', len(work))
1072 tracing.counter('dirstate.walk work', len(work))
1075 nd = work.pop()
1073 nd = work.pop()
1076 visitentries = match.visitchildrenset(nd)
1074 visitentries = match.visitchildrenset(nd)
1077 if not visitentries:
1075 if not visitentries:
1078 continue
1076 continue
1079 if visitentries == b'this' or visitentries == b'all':
1077 if visitentries == b'this' or visitentries == b'all':
1080 visitentries = None
1078 visitentries = None
1081 skip = None
1079 skip = None
1082 if nd != b'':
1080 if nd != b'':
1083 skip = b'.hg'
1081 skip = b'.hg'
1084 try:
1082 try:
1085 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1083 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1086 entries = listdir(join(nd), stat=True, skip=skip)
1084 entries = listdir(join(nd), stat=True, skip=skip)
1087 except OSError as inst:
1085 except OSError as inst:
1088 if inst.errno in (errno.EACCES, errno.ENOENT):
1086 if inst.errno in (errno.EACCES, errno.ENOENT):
1089 match.bad(
1087 match.bad(
1090 self.pathto(nd), encoding.strtolocal(inst.strerror)
1088 self.pathto(nd), encoding.strtolocal(inst.strerror)
1091 )
1089 )
1092 continue
1090 continue
1093 raise
1091 raise
1094 for f, kind, st in entries:
1092 for f, kind, st in entries:
1095 # Some matchers may return files in the visitentries set,
1093 # Some matchers may return files in the visitentries set,
1096 # instead of 'this', if the matcher explicitly mentions them
1094 # instead of 'this', if the matcher explicitly mentions them
1097 # and is not an exactmatcher. This is acceptable; we do not
1095 # and is not an exactmatcher. This is acceptable; we do not
1098 # make any hard assumptions about file-or-directory below
1096 # make any hard assumptions about file-or-directory below
1099 # based on the presence of `f` in visitentries. If
1097 # based on the presence of `f` in visitentries. If
1100 # visitchildrenset returned a set, we can always skip the
1098 # visitchildrenset returned a set, we can always skip the
1101 # entries *not* in the set it provided regardless of whether
1099 # entries *not* in the set it provided regardless of whether
1102 # they're actually a file or a directory.
1100 # they're actually a file or a directory.
1103 if visitentries and f not in visitentries:
1101 if visitentries and f not in visitentries:
1104 continue
1102 continue
1105 if normalizefile:
1103 if normalizefile:
1106 # even though f might be a directory, we're only
1104 # even though f might be a directory, we're only
1107 # interested in comparing it to files currently in the
1105 # interested in comparing it to files currently in the
1108 # dmap -- therefore normalizefile is enough
1106 # dmap -- therefore normalizefile is enough
1109 nf = normalizefile(
1107 nf = normalizefile(
1110 nd and (nd + b"/" + f) or f, True, True
1108 nd and (nd + b"/" + f) or f, True, True
1111 )
1109 )
1112 else:
1110 else:
1113 nf = nd and (nd + b"/" + f) or f
1111 nf = nd and (nd + b"/" + f) or f
1114 if nf not in results:
1112 if nf not in results:
1115 if kind == dirkind:
1113 if kind == dirkind:
1116 if not ignore(nf):
1114 if not ignore(nf):
1117 if matchtdir:
1115 if matchtdir:
1118 matchtdir(nf)
1116 matchtdir(nf)
1119 wadd(nf)
1117 wadd(nf)
1120 if nf in dmap and (matchalways or matchfn(nf)):
1118 if nf in dmap and (matchalways or matchfn(nf)):
1121 results[nf] = None
1119 results[nf] = None
1122 elif kind == regkind or kind == lnkkind:
1120 elif kind == regkind or kind == lnkkind:
1123 if nf in dmap:
1121 if nf in dmap:
1124 if matchalways or matchfn(nf):
1122 if matchalways or matchfn(nf):
1125 results[nf] = st
1123 results[nf] = st
1126 elif (matchalways or matchfn(nf)) and not ignore(
1124 elif (matchalways or matchfn(nf)) and not ignore(
1127 nf
1125 nf
1128 ):
1126 ):
1129 # unknown file -- normalize if necessary
1127 # unknown file -- normalize if necessary
1130 if not alreadynormed:
1128 if not alreadynormed:
1131 nf = normalize(nf, False, True)
1129 nf = normalize(nf, False, True)
1132 results[nf] = st
1130 results[nf] = st
1133 elif nf in dmap and (matchalways or matchfn(nf)):
1131 elif nf in dmap and (matchalways or matchfn(nf)):
1134 results[nf] = None
1132 results[nf] = None
1135
1133
1136 for nd, d in work:
1134 for nd, d in work:
1137 # alreadynormed means that processwork doesn't have to do any
1135 # alreadynormed means that processwork doesn't have to do any
1138 # expensive directory normalization
1136 # expensive directory normalization
1139 alreadynormed = not normalize or nd == d
1137 alreadynormed = not normalize or nd == d
1140 traverse([d], alreadynormed)
1138 traverse([d], alreadynormed)
1141
1139
1142 for s in subrepos:
1140 for s in subrepos:
1143 del results[s]
1141 del results[s]
1144 del results[b'.hg']
1142 del results[b'.hg']
1145
1143
1146 # step 3: visit remaining files from dmap
1144 # step 3: visit remaining files from dmap
1147 if not skipstep3 and not exact:
1145 if not skipstep3 and not exact:
1148 # If a dmap file is not in results yet, it was either
1146 # If a dmap file is not in results yet, it was either
1149 # a) not matching matchfn b) ignored, c) missing, or d) under a
1147 # a) not matching matchfn b) ignored, c) missing, or d) under a
1150 # symlink directory.
1148 # symlink directory.
1151 if not results and matchalways:
1149 if not results and matchalways:
1152 visit = [f for f in dmap]
1150 visit = [f for f in dmap]
1153 else:
1151 else:
1154 visit = [f for f in dmap if f not in results and matchfn(f)]
1152 visit = [f for f in dmap if f not in results and matchfn(f)]
1155 visit.sort()
1153 visit.sort()
1156
1154
1157 if unknown:
1155 if unknown:
1158 # unknown == True means we walked all dirs under the roots
1156 # unknown == True means we walked all dirs under the roots
1159 # that wasn't ignored, and everything that matched was stat'ed
1157 # that wasn't ignored, and everything that matched was stat'ed
1160 # and is already in results.
1158 # and is already in results.
1161 # The rest must thus be ignored or under a symlink.
1159 # The rest must thus be ignored or under a symlink.
1162 audit_path = pathutil.pathauditor(self._root, cached=True)
1160 audit_path = pathutil.pathauditor(self._root, cached=True)
1163
1161
1164 for nf in iter(visit):
1162 for nf in iter(visit):
1165 # If a stat for the same file was already added with a
1163 # If a stat for the same file was already added with a
1166 # different case, don't add one for this, since that would
1164 # different case, don't add one for this, since that would
1167 # make it appear as if the file exists under both names
1165 # make it appear as if the file exists under both names
1168 # on disk.
1166 # on disk.
1169 if (
1167 if (
1170 normalizefile
1168 normalizefile
1171 and normalizefile(nf, True, True) in results
1169 and normalizefile(nf, True, True) in results
1172 ):
1170 ):
1173 results[nf] = None
1171 results[nf] = None
1174 # Report ignored items in the dmap as long as they are not
1172 # Report ignored items in the dmap as long as they are not
1175 # under a symlink directory.
1173 # under a symlink directory.
1176 elif audit_path.check(nf):
1174 elif audit_path.check(nf):
1177 try:
1175 try:
1178 results[nf] = lstat(join(nf))
1176 results[nf] = lstat(join(nf))
1179 # file was just ignored, no links, and exists
1177 # file was just ignored, no links, and exists
1180 except OSError:
1178 except OSError:
1181 # file doesn't exist
1179 # file doesn't exist
1182 results[nf] = None
1180 results[nf] = None
1183 else:
1181 else:
1184 # It's either missing or under a symlink directory
1182 # It's either missing or under a symlink directory
1185 # which we in this case report as missing
1183 # which we in this case report as missing
1186 results[nf] = None
1184 results[nf] = None
1187 else:
1185 else:
1188 # We may not have walked the full directory tree above,
1186 # We may not have walked the full directory tree above,
1189 # so stat and check everything we missed.
1187 # so stat and check everything we missed.
1190 iv = iter(visit)
1188 iv = iter(visit)
1191 for st in util.statfiles([join(i) for i in visit]):
1189 for st in util.statfiles([join(i) for i in visit]):
1192 results[next(iv)] = st
1190 results[next(iv)] = st
1193 return results
1191 return results
1194
1192
1195 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1193 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1196 # Force Rayon (Rust parallelism library) to respect the number of
1194 # Force Rayon (Rust parallelism library) to respect the number of
1197 # workers. This is a temporary workaround until Rust code knows
1195 # workers. This is a temporary workaround until Rust code knows
1198 # how to read the config file.
1196 # how to read the config file.
1199 numcpus = self._ui.configint(b"worker", b"numcpus")
1197 numcpus = self._ui.configint(b"worker", b"numcpus")
1200 if numcpus is not None:
1198 if numcpus is not None:
1201 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1199 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1202
1200
1203 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1201 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1204 if not workers_enabled:
1202 if not workers_enabled:
1205 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1203 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1206
1204
1207 (
1205 (
1208 lookup,
1206 lookup,
1209 modified,
1207 modified,
1210 added,
1208 added,
1211 removed,
1209 removed,
1212 deleted,
1210 deleted,
1213 clean,
1211 clean,
1214 ignored,
1212 ignored,
1215 unknown,
1213 unknown,
1216 warnings,
1214 warnings,
1217 bad,
1215 bad,
1218 traversed,
1216 traversed,
1219 dirty,
1217 dirty,
1220 ) = rustmod.status(
1218 ) = rustmod.status(
1221 self._map._map,
1219 self._map._map,
1222 matcher,
1220 matcher,
1223 self._rootdir,
1221 self._rootdir,
1224 self._ignorefiles(),
1222 self._ignorefiles(),
1225 self._checkexec,
1223 self._checkexec,
1226 self._lastnormaltime,
1224 self._lastnormaltime,
1227 bool(list_clean),
1225 bool(list_clean),
1228 bool(list_ignored),
1226 bool(list_ignored),
1229 bool(list_unknown),
1227 bool(list_unknown),
1230 bool(matcher.traversedir),
1228 bool(matcher.traversedir),
1231 )
1229 )
1232
1230
1233 self._dirty |= dirty
1231 self._dirty |= dirty
1234
1232
1235 if matcher.traversedir:
1233 if matcher.traversedir:
1236 for dir in traversed:
1234 for dir in traversed:
1237 matcher.traversedir(dir)
1235 matcher.traversedir(dir)
1238
1236
1239 if self._ui.warn:
1237 if self._ui.warn:
1240 for item in warnings:
1238 for item in warnings:
1241 if isinstance(item, tuple):
1239 if isinstance(item, tuple):
1242 file_path, syntax = item
1240 file_path, syntax = item
1243 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1241 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1244 file_path,
1242 file_path,
1245 syntax,
1243 syntax,
1246 )
1244 )
1247 self._ui.warn(msg)
1245 self._ui.warn(msg)
1248 else:
1246 else:
1249 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1247 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1250 self._ui.warn(
1248 self._ui.warn(
1251 msg
1249 msg
1252 % (
1250 % (
1253 pathutil.canonpath(
1251 pathutil.canonpath(
1254 self._rootdir, self._rootdir, item
1252 self._rootdir, self._rootdir, item
1255 ),
1253 ),
1256 b"No such file or directory",
1254 b"No such file or directory",
1257 )
1255 )
1258 )
1256 )
1259
1257
1260 for (fn, message) in bad:
1258 for (fn, message) in bad:
1261 matcher.bad(fn, encoding.strtolocal(message))
1259 matcher.bad(fn, encoding.strtolocal(message))
1262
1260
1263 status = scmutil.status(
1261 status = scmutil.status(
1264 modified=modified,
1262 modified=modified,
1265 added=added,
1263 added=added,
1266 removed=removed,
1264 removed=removed,
1267 deleted=deleted,
1265 deleted=deleted,
1268 unknown=unknown,
1266 unknown=unknown,
1269 ignored=ignored,
1267 ignored=ignored,
1270 clean=clean,
1268 clean=clean,
1271 )
1269 )
1272 return (lookup, status)
1270 return (lookup, status)
1273
1271
1274 def status(self, match, subrepos, ignored, clean, unknown):
1272 def status(self, match, subrepos, ignored, clean, unknown):
1275 """Determine the status of the working copy relative to the
1273 """Determine the status of the working copy relative to the
1276 dirstate and return a pair of (unsure, status), where status is of type
1274 dirstate and return a pair of (unsure, status), where status is of type
1277 scmutil.status and:
1275 scmutil.status and:
1278
1276
1279 unsure:
1277 unsure:
1280 files that might have been modified since the dirstate was
1278 files that might have been modified since the dirstate was
1281 written, but need to be read to be sure (size is the same
1279 written, but need to be read to be sure (size is the same
1282 but mtime differs)
1280 but mtime differs)
1283 status.modified:
1281 status.modified:
1284 files that have definitely been modified since the dirstate
1282 files that have definitely been modified since the dirstate
1285 was written (different size or mode)
1283 was written (different size or mode)
1286 status.clean:
1284 status.clean:
1287 files that have definitely not been modified since the
1285 files that have definitely not been modified since the
1288 dirstate was written
1286 dirstate was written
1289 """
1287 """
1290 listignored, listclean, listunknown = ignored, clean, unknown
1288 listignored, listclean, listunknown = ignored, clean, unknown
1291 lookup, modified, added, unknown, ignored = [], [], [], [], []
1289 lookup, modified, added, unknown, ignored = [], [], [], [], []
1292 removed, deleted, clean = [], [], []
1290 removed, deleted, clean = [], [], []
1293
1291
1294 dmap = self._map
1292 dmap = self._map
1295 dmap.preload()
1293 dmap.preload()
1296
1294
1297 use_rust = True
1295 use_rust = True
1298
1296
1299 allowed_matchers = (
1297 allowed_matchers = (
1300 matchmod.alwaysmatcher,
1298 matchmod.alwaysmatcher,
1301 matchmod.exactmatcher,
1299 matchmod.exactmatcher,
1302 matchmod.includematcher,
1300 matchmod.includematcher,
1303 )
1301 )
1304
1302
1305 if rustmod is None:
1303 if rustmod is None:
1306 use_rust = False
1304 use_rust = False
1307 elif self._checkcase:
1305 elif self._checkcase:
1308 # Case-insensitive filesystems are not handled yet
1306 # Case-insensitive filesystems are not handled yet
1309 use_rust = False
1307 use_rust = False
1310 elif subrepos:
1308 elif subrepos:
1311 use_rust = False
1309 use_rust = False
1312 elif sparse.enabled:
1310 elif sparse.enabled:
1313 use_rust = False
1311 use_rust = False
1314 elif not isinstance(match, allowed_matchers):
1312 elif not isinstance(match, allowed_matchers):
1315 # Some matchers have yet to be implemented
1313 # Some matchers have yet to be implemented
1316 use_rust = False
1314 use_rust = False
1317
1315
1318 if use_rust:
1316 if use_rust:
1319 try:
1317 try:
1320 return self._rust_status(
1318 return self._rust_status(
1321 match, listclean, listignored, listunknown
1319 match, listclean, listignored, listunknown
1322 )
1320 )
1323 except rustmod.FallbackError:
1321 except rustmod.FallbackError:
1324 pass
1322 pass
1325
1323
1326 def noop(f):
1324 def noop(f):
1327 pass
1325 pass
1328
1326
1329 dcontains = dmap.__contains__
1327 dcontains = dmap.__contains__
1330 dget = dmap.__getitem__
1328 dget = dmap.__getitem__
1331 ladd = lookup.append # aka "unsure"
1329 ladd = lookup.append # aka "unsure"
1332 madd = modified.append
1330 madd = modified.append
1333 aadd = added.append
1331 aadd = added.append
1334 uadd = unknown.append if listunknown else noop
1332 uadd = unknown.append if listunknown else noop
1335 iadd = ignored.append if listignored else noop
1333 iadd = ignored.append if listignored else noop
1336 radd = removed.append
1334 radd = removed.append
1337 dadd = deleted.append
1335 dadd = deleted.append
1338 cadd = clean.append if listclean else noop
1336 cadd = clean.append if listclean else noop
1339 mexact = match.exact
1337 mexact = match.exact
1340 dirignore = self._dirignore
1338 dirignore = self._dirignore
1341 checkexec = self._checkexec
1339 checkexec = self._checkexec
1342 copymap = self._map.copymap
1340 copymap = self._map.copymap
1343 lastnormaltime = self._lastnormaltime
1341 lastnormaltime = self._lastnormaltime
1344
1342
1345 # We need to do full walks when either
1343 # We need to do full walks when either
1346 # - we're listing all clean files, or
1344 # - we're listing all clean files, or
1347 # - match.traversedir does something, because match.traversedir should
1345 # - match.traversedir does something, because match.traversedir should
1348 # be called for every dir in the working dir
1346 # be called for every dir in the working dir
1349 full = listclean or match.traversedir is not None
1347 full = listclean or match.traversedir is not None
1350 for fn, st in pycompat.iteritems(
1348 for fn, st in pycompat.iteritems(
1351 self.walk(match, subrepos, listunknown, listignored, full=full)
1349 self.walk(match, subrepos, listunknown, listignored, full=full)
1352 ):
1350 ):
1353 if not dcontains(fn):
1351 if not dcontains(fn):
1354 if (listignored or mexact(fn)) and dirignore(fn):
1352 if (listignored or mexact(fn)) and dirignore(fn):
1355 if listignored:
1353 if listignored:
1356 iadd(fn)
1354 iadd(fn)
1357 else:
1355 else:
1358 uadd(fn)
1356 uadd(fn)
1359 continue
1357 continue
1360
1358
1361 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1359 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1362 # written like that for performance reasons. dmap[fn] is not a
1360 # written like that for performance reasons. dmap[fn] is not a
1363 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1361 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1364 # opcode has fast paths when the value to be unpacked is a tuple or
1362 # opcode has fast paths when the value to be unpacked is a tuple or
1365 # a list, but falls back to creating a full-fledged iterator in
1363 # a list, but falls back to creating a full-fledged iterator in
1366 # general. That is much slower than simply accessing and storing the
1364 # general. That is much slower than simply accessing and storing the
1367 # tuple members one by one.
1365 # tuple members one by one.
1368 t = dget(fn)
1366 t = dget(fn)
1369 mode = t.mode
1367 mode = t.mode
1370 size = t.size
1368 size = t.size
1371 time = t.mtime
1369 time = t.mtime
1372
1370
1373 if not st and t.tracked:
1371 if not st and t.tracked:
1374 dadd(fn)
1372 dadd(fn)
1375 elif t.p2_info:
1373 elif t.p2_info:
1376 madd(fn)
1374 madd(fn)
1377 elif t.added:
1375 elif t.added:
1378 aadd(fn)
1376 aadd(fn)
1379 elif t.removed:
1377 elif t.removed:
1380 radd(fn)
1378 radd(fn)
1381 elif t.tracked:
1379 elif t.tracked:
1382 if (
1380 if (
1383 size >= 0
1381 size >= 0
1384 and (
1382 and (
1385 (size != st.st_size and size != st.st_size & _rangemask)
1383 (size != st.st_size and size != st.st_size & _rangemask)
1386 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1384 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1387 )
1385 )
1388 or fn in copymap
1386 or fn in copymap
1389 ):
1387 ):
1390 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1388 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1391 # issue6456: Size returned may be longer due to
1389 # issue6456: Size returned may be longer due to
1392 # encryption on EXT-4 fscrypt, undecided.
1390 # encryption on EXT-4 fscrypt, undecided.
1393 ladd(fn)
1391 ladd(fn)
1394 else:
1392 else:
1395 madd(fn)
1393 madd(fn)
1396 elif (
1394 elif (
1397 time != st[stat.ST_MTIME]
1395 time != st[stat.ST_MTIME]
1398 and time != st[stat.ST_MTIME] & _rangemask
1396 and time != st[stat.ST_MTIME] & _rangemask
1399 ):
1397 ):
1400 ladd(fn)
1398 ladd(fn)
1401 elif st[stat.ST_MTIME] == lastnormaltime:
1399 elif st[stat.ST_MTIME] == lastnormaltime:
1402 # fn may have just been marked as normal and it may have
1400 # fn may have just been marked as normal and it may have
1403 # changed in the same second without changing its size.
1401 # changed in the same second without changing its size.
1404 # This can happen if we quickly do multiple commits.
1402 # This can happen if we quickly do multiple commits.
1405 # Force lookup, so we don't miss such a racy file change.
1403 # Force lookup, so we don't miss such a racy file change.
1406 ladd(fn)
1404 ladd(fn)
1407 elif listclean:
1405 elif listclean:
1408 cadd(fn)
1406 cadd(fn)
1409 status = scmutil.status(
1407 status = scmutil.status(
1410 modified, added, removed, deleted, unknown, ignored, clean
1408 modified, added, removed, deleted, unknown, ignored, clean
1411 )
1409 )
1412 return (lookup, status)
1410 return (lookup, status)
1413
1411
1414 def matches(self, match):
1412 def matches(self, match):
1415 """
1413 """
1416 return files in the dirstate (in whatever state) filtered by match
1414 return files in the dirstate (in whatever state) filtered by match
1417 """
1415 """
1418 dmap = self._map
1416 dmap = self._map
1419 if rustmod is not None:
1417 if rustmod is not None:
1420 dmap = self._map._map
1418 dmap = self._map._map
1421
1419
1422 if match.always():
1420 if match.always():
1423 return dmap.keys()
1421 return dmap.keys()
1424 files = match.files()
1422 files = match.files()
1425 if match.isexact():
1423 if match.isexact():
1426 # fast path -- filter the other way around, since typically files is
1424 # fast path -- filter the other way around, since typically files is
1427 # much smaller than dmap
1425 # much smaller than dmap
1428 return [f for f in files if f in dmap]
1426 return [f for f in files if f in dmap]
1429 if match.prefix() and all(fn in dmap for fn in files):
1427 if match.prefix() and all(fn in dmap for fn in files):
1430 # fast path -- all the values are known to be files, so just return
1428 # fast path -- all the values are known to be files, so just return
1431 # that
1429 # that
1432 return list(files)
1430 return list(files)
1433 return [f for f in dmap if match(f)]
1431 return [f for f in dmap if match(f)]
1434
1432
1435 def _actualfilename(self, tr):
1433 def _actualfilename(self, tr):
1436 if tr:
1434 if tr:
1437 return self._pendingfilename
1435 return self._pendingfilename
1438 else:
1436 else:
1439 return self._filename
1437 return self._filename
1440
1438
1441 def savebackup(self, tr, backupname):
1439 def savebackup(self, tr, backupname):
1442 '''Save current dirstate into backup file'''
1440 '''Save current dirstate into backup file'''
1443 filename = self._actualfilename(tr)
1441 filename = self._actualfilename(tr)
1444 assert backupname != filename
1442 assert backupname != filename
1445
1443
1446 # use '_writedirstate' instead of 'write' to write changes certainly,
1444 # use '_writedirstate' instead of 'write' to write changes certainly,
1447 # because the latter omits writing out if transaction is running.
1445 # because the latter omits writing out if transaction is running.
1448 # output file will be used to create backup of dirstate at this point.
1446 # output file will be used to create backup of dirstate at this point.
1449 if self._dirty or not self._opener.exists(filename):
1447 if self._dirty or not self._opener.exists(filename):
1450 self._writedirstate(
1448 self._writedirstate(
1451 tr,
1449 tr,
1452 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1450 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1453 )
1451 )
1454
1452
1455 if tr:
1453 if tr:
1456 # ensure that subsequent tr.writepending returns True for
1454 # ensure that subsequent tr.writepending returns True for
1457 # changes written out above, even if dirstate is never
1455 # changes written out above, even if dirstate is never
1458 # changed after this
1456 # changed after this
1459 tr.addfilegenerator(
1457 tr.addfilegenerator(
1460 b'dirstate',
1458 b'dirstate',
1461 (self._filename,),
1459 (self._filename,),
1462 lambda f: self._writedirstate(tr, f),
1460 lambda f: self._writedirstate(tr, f),
1463 location=b'plain',
1461 location=b'plain',
1464 )
1462 )
1465
1463
1466 # ensure that pending file written above is unlinked at
1464 # ensure that pending file written above is unlinked at
1467 # failure, even if tr.writepending isn't invoked until the
1465 # failure, even if tr.writepending isn't invoked until the
1468 # end of this transaction
1466 # end of this transaction
1469 tr.registertmp(filename, location=b'plain')
1467 tr.registertmp(filename, location=b'plain')
1470
1468
1471 self._opener.tryunlink(backupname)
1469 self._opener.tryunlink(backupname)
1472 # hardlink backup is okay because _writedirstate is always called
1470 # hardlink backup is okay because _writedirstate is always called
1473 # with an "atomictemp=True" file.
1471 # with an "atomictemp=True" file.
1474 util.copyfile(
1472 util.copyfile(
1475 self._opener.join(filename),
1473 self._opener.join(filename),
1476 self._opener.join(backupname),
1474 self._opener.join(backupname),
1477 hardlink=True,
1475 hardlink=True,
1478 )
1476 )
1479
1477
1480 def restorebackup(self, tr, backupname):
1478 def restorebackup(self, tr, backupname):
1481 '''Restore dirstate by backup file'''
1479 '''Restore dirstate by backup file'''
1482 # this "invalidate()" prevents "wlock.release()" from writing
1480 # this "invalidate()" prevents "wlock.release()" from writing
1483 # changes of dirstate out after restoring from backup file
1481 # changes of dirstate out after restoring from backup file
1484 self.invalidate()
1482 self.invalidate()
1485 filename = self._actualfilename(tr)
1483 filename = self._actualfilename(tr)
1486 o = self._opener
1484 o = self._opener
1487 if util.samefile(o.join(backupname), o.join(filename)):
1485 if util.samefile(o.join(backupname), o.join(filename)):
1488 o.unlink(backupname)
1486 o.unlink(backupname)
1489 else:
1487 else:
1490 o.rename(backupname, filename, checkambig=True)
1488 o.rename(backupname, filename, checkambig=True)
1491
1489
1492 def clearbackup(self, tr, backupname):
1490 def clearbackup(self, tr, backupname):
1493 '''Clear backup file'''
1491 '''Clear backup file'''
1494 self._opener.unlink(backupname)
1492 self._opener.unlink(backupname)
1495
1493
1496 def verify(self, m1, m2):
1494 def verify(self, m1, m2):
1497 """check the dirstate content again the parent manifest and yield errors"""
1495 """check the dirstate content again the parent manifest and yield errors"""
1498 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1496 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1499 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1497 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1500 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1498 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1501 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1499 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1502 for f, entry in self.items():
1500 for f, entry in self.items():
1503 state = entry.state
1501 state = entry.state
1504 if state in b"nr" and f not in m1:
1502 if state in b"nr" and f not in m1:
1505 yield (missing_from_p1, f, state)
1503 yield (missing_from_p1, f, state)
1506 if state in b"a" and f in m1:
1504 if state in b"a" and f in m1:
1507 yield (unexpected_in_p1, f, state)
1505 yield (unexpected_in_p1, f, state)
1508 if state in b"m" and f not in m1 and f not in m2:
1506 if state in b"m" and f not in m1 and f not in m2:
1509 yield (missing_from_ps, f, state)
1507 yield (missing_from_ps, f, state)
1510 for f in m1:
1508 for f in m1:
1511 state = self.get_entry(f).state
1509 state = self.get_entry(f).state
1512 if state not in b"nrm":
1510 if state not in b"nrm":
1513 yield (missing_from_ds, f, state)
1511 yield (missing_from_ds, f, state)
@@ -1,732 +1,733 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 v2,
23 v2,
24 )
24 )
25
25
26 parsers = policy.importmod('parsers')
26 parsers = policy.importmod('parsers')
27 rustmod = policy.importrust('dirstate')
27 rustmod = policy.importrust('dirstate')
28
28
29 propertycache = util.propertycache
29 propertycache = util.propertycache
30
30
31 if rustmod is None:
31 if rustmod is None:
32 DirstateItem = parsers.DirstateItem
32 DirstateItem = parsers.DirstateItem
33 else:
33 else:
34 DirstateItem = rustmod.DirstateItem
34 DirstateItem = rustmod.DirstateItem
35
35
36 rangemask = 0x7FFFFFFF
36 rangemask = 0x7FFFFFFF
37
37
38
38
39 class _dirstatemapcommon(object):
39 class _dirstatemapcommon(object):
40 """
40 """
41 Methods that are identical for both implementations of the dirstatemap
41 Methods that are identical for both implementations of the dirstatemap
42 class, with and without Rust extensions enabled.
42 class, with and without Rust extensions enabled.
43 """
43 """
44
44
45 # please pytype
45 # please pytype
46
46
47 _map = None
47 _map = None
48 copymap = None
48 copymap = None
49
49
50 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
50 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
51 self._use_dirstate_v2 = use_dirstate_v2
51 self._use_dirstate_v2 = use_dirstate_v2
52 self._nodeconstants = nodeconstants
52 self._nodeconstants = nodeconstants
53 self._ui = ui
53 self._ui = ui
54 self._opener = opener
54 self._opener = opener
55 self._root = root
55 self._root = root
56 self._filename = b'dirstate'
56 self._filename = b'dirstate'
57 self._nodelen = 20 # Also update Rust code when changing this!
57 self._nodelen = 20 # Also update Rust code when changing this!
58 self._parents = None
58 self._parents = None
59 self._dirtyparents = False
59 self._dirtyparents = False
60 self._docket = None
60 self._docket = None
61
61
62 # for consistent view between _pl() and _read() invocations
62 # for consistent view between _pl() and _read() invocations
63 self._pendingmode = None
63 self._pendingmode = None
64
64
65 def preload(self):
65 def preload(self):
66 """Loads the underlying data, if it's not already loaded"""
66 """Loads the underlying data, if it's not already loaded"""
67 self._map
67 self._map
68
68
69 def get(self, key, default=None):
69 def get(self, key, default=None):
70 return self._map.get(key, default)
70 return self._map.get(key, default)
71
71
72 def __len__(self):
72 def __len__(self):
73 return len(self._map)
73 return len(self._map)
74
74
75 def __iter__(self):
75 def __iter__(self):
76 return iter(self._map)
76 return iter(self._map)
77
77
78 def __contains__(self, key):
78 def __contains__(self, key):
79 return key in self._map
79 return key in self._map
80
80
81 def __getitem__(self, item):
81 def __getitem__(self, item):
82 return self._map[item]
82 return self._map[item]
83
83
84 ### sub-class utility method
84 ### sub-class utility method
85 #
85 #
86 # Use to allow for generic implementation of some method while still coping
86 # Use to allow for generic implementation of some method while still coping
87 # with minor difference between implementation.
87 # with minor difference between implementation.
88
88
89 def _dirs_incr(self, filename, old_entry=None):
89 def _dirs_incr(self, filename, old_entry=None):
90 """incremente the dirstate counter if applicable
90 """incremente the dirstate counter if applicable
91
91
92 This might be a no-op for some subclass who deal with directory
92 This might be a no-op for some subclass who deal with directory
93 tracking in a different way.
93 tracking in a different way.
94 """
94 """
95
95
96 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
96 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
97 """decremente the dirstate counter if applicable
97 """decremente the dirstate counter if applicable
98
98
99 This might be a no-op for some subclass who deal with directory
99 This might be a no-op for some subclass who deal with directory
100 tracking in a different way.
100 tracking in a different way.
101 """
101 """
102
102
103 def _refresh_entry(self, f, entry):
103 def _refresh_entry(self, f, entry):
104 """record updated state of an entry"""
104 """record updated state of an entry"""
105
105
106 def _insert_entry(self, f, entry):
106 def _insert_entry(self, f, entry):
107 """add a new dirstate entry (or replace an unrelated one)
107 """add a new dirstate entry (or replace an unrelated one)
108
108
109 The fact it is actually new is the responsability of the caller
109 The fact it is actually new is the responsability of the caller
110 """
110 """
111
111
112 def _drop_entry(self, f):
112 def _drop_entry(self, f):
113 """remove any entry for file f
113 """remove any entry for file f
114
114
115 This should also drop associated copy information
115 This should also drop associated copy information
116
116
117 The fact we actually need to drop it is the responsability of the caller"""
117 The fact we actually need to drop it is the responsability of the caller"""
118
118
119 ### method to manipulate the entries
119 ### method to manipulate the entries
120
120
121 def set_possibly_dirty(self, filename):
121 def set_possibly_dirty(self, filename):
122 """record that the current state of the file on disk is unknown"""
122 """record that the current state of the file on disk is unknown"""
123 entry = self[filename]
123 entry = self[filename]
124 entry.set_possibly_dirty()
124 entry.set_possibly_dirty()
125 self._refresh_entry(filename, entry)
125 self._refresh_entry(filename, entry)
126
126
127 def set_clean(self, filename, mode, size, mtime):
127 def set_clean(self, filename, mode, size, mtime):
128 """mark a file as back to a clean state"""
128 """mark a file as back to a clean state"""
129 entry = self[filename]
129 entry = self[filename]
130 mtime = mtime & rangemask
130 mtime = mtime & rangemask
131 size = size & rangemask
131 size = size & rangemask
132 entry.set_clean(mode, size, mtime)
132 entry.set_clean(mode, size, mtime)
133 self._refresh_entry(filename, entry)
133 self._refresh_entry(filename, entry)
134 self.copymap.pop(filename, None)
134 self.copymap.pop(filename, None)
135
135
136 def set_tracked(self, filename):
136 def set_tracked(self, filename):
137 new = False
137 new = False
138 entry = self.get(filename)
138 entry = self.get(filename)
139 if entry is None:
139 if entry is None:
140 self._dirs_incr(filename)
140 self._dirs_incr(filename)
141 entry = DirstateItem(
141 entry = DirstateItem(
142 wc_tracked=True,
142 wc_tracked=True,
143 )
143 )
144
144
145 self._insert_entry(filename, entry)
145 self._insert_entry(filename, entry)
146 new = True
146 new = True
147 elif not entry.tracked:
147 elif not entry.tracked:
148 self._dirs_incr(filename, entry)
148 self._dirs_incr(filename, entry)
149 entry.set_tracked()
149 entry.set_tracked()
150 self._refresh_entry(filename, entry)
150 self._refresh_entry(filename, entry)
151 new = True
151 new = True
152 else:
152 else:
153 # XXX This is probably overkill for more case, but we need this to
153 # XXX This is probably overkill for more case, but we need this to
154 # fully replace the `normallookup` call with `set_tracked` one.
154 # fully replace the `normallookup` call with `set_tracked` one.
155 # Consider smoothing this in the future.
155 # Consider smoothing this in the future.
156 entry.set_possibly_dirty()
156 entry.set_possibly_dirty()
157 self._refresh_entry(filename, entry)
157 self._refresh_entry(filename, entry)
158 return new
158 return new
159
159
160 def set_untracked(self, f):
160 def set_untracked(self, f):
161 """Mark a file as no longer tracked in the dirstate map"""
161 """Mark a file as no longer tracked in the dirstate map"""
162 entry = self.get(f)
162 entry = self.get(f)
163 if entry is None:
163 if entry is None:
164 return False
164 return False
165 else:
165 else:
166 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
166 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
167 if not entry.p2_info:
167 if not entry.p2_info:
168 self.copymap.pop(f, None)
168 self.copymap.pop(f, None)
169 entry.set_untracked()
169 entry.set_untracked()
170 self._refresh_entry(f, entry)
170 self._refresh_entry(f, entry)
171 return True
171 return True
172
172
173 def reset_state(
173 def reset_state(
174 self,
174 self,
175 filename,
175 filename,
176 wc_tracked=False,
176 wc_tracked=False,
177 p1_tracked=False,
177 p1_tracked=False,
178 p2_info=False,
178 p2_info=False,
179 has_meaningful_mtime=True,
179 has_meaningful_mtime=True,
180 has_meaningful_data=True,
180 has_meaningful_data=True,
181 parentfiledata=None,
181 parentfiledata=None,
182 ):
182 ):
183 """Set a entry to a given state, diregarding all previous state
183 """Set a entry to a given state, diregarding all previous state
184
184
185 This is to be used by the part of the dirstate API dedicated to
185 This is to be used by the part of the dirstate API dedicated to
186 adjusting the dirstate after a update/merge.
186 adjusting the dirstate after a update/merge.
187
187
188 note: calling this might result to no entry existing at all if the
188 note: calling this might result to no entry existing at all if the
189 dirstate map does not see any point at having one for this file
189 dirstate map does not see any point at having one for this file
190 anymore.
190 anymore.
191 """
191 """
192 # copy information are now outdated
192 # copy information are now outdated
193 # (maybe new information should be in directly passed to this function)
193 # (maybe new information should be in directly passed to this function)
194 self.copymap.pop(filename, None)
194 self.copymap.pop(filename, None)
195
195
196 if not (p1_tracked or p2_info or wc_tracked):
196 if not (p1_tracked or p2_info or wc_tracked):
197 old_entry = self._map.get(filename)
197 old_entry = self._map.get(filename)
198 self._drop_entry(filename)
198 self._drop_entry(filename)
199 self._dirs_decr(filename, old_entry=old_entry)
199 self._dirs_decr(filename, old_entry=old_entry)
200 return
200 return
201
201
202 old_entry = self._map.get(filename)
202 old_entry = self._map.get(filename)
203 self._dirs_incr(filename, old_entry)
203 self._dirs_incr(filename, old_entry)
204 entry = DirstateItem(
204 entry = DirstateItem(
205 wc_tracked=wc_tracked,
205 wc_tracked=wc_tracked,
206 p1_tracked=p1_tracked,
206 p1_tracked=p1_tracked,
207 p2_info=p2_info,
207 p2_info=p2_info,
208 has_meaningful_mtime=has_meaningful_mtime,
208 has_meaningful_mtime=has_meaningful_mtime,
209 parentfiledata=parentfiledata,
209 parentfiledata=parentfiledata,
210 )
210 )
211 self._insert_entry(filename, entry)
211 self._insert_entry(filename, entry)
212
212
213 ### disk interaction
213 ### disk interaction
214
214
215 def _opendirstatefile(self):
215 def _opendirstatefile(self):
216 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
216 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
217 if self._pendingmode is not None and self._pendingmode != mode:
217 if self._pendingmode is not None and self._pendingmode != mode:
218 fp.close()
218 fp.close()
219 raise error.Abort(
219 raise error.Abort(
220 _(b'working directory state may be changed parallelly')
220 _(b'working directory state may be changed parallelly')
221 )
221 )
222 self._pendingmode = mode
222 self._pendingmode = mode
223 return fp
223 return fp
224
224
225 def _readdirstatefile(self, size=-1):
225 def _readdirstatefile(self, size=-1):
226 try:
226 try:
227 with self._opendirstatefile() as fp:
227 with self._opendirstatefile() as fp:
228 return fp.read(size)
228 return fp.read(size)
229 except IOError as err:
229 except IOError as err:
230 if err.errno != errno.ENOENT:
230 if err.errno != errno.ENOENT:
231 raise
231 raise
232 # File doesn't exist, so the current state is empty
232 # File doesn't exist, so the current state is empty
233 return b''
233 return b''
234
234
235 @property
235 @property
236 def docket(self):
236 def docket(self):
237 if not self._docket:
237 if not self._docket:
238 if not self._use_dirstate_v2:
238 if not self._use_dirstate_v2:
239 raise error.ProgrammingError(
239 raise error.ProgrammingError(
240 b'dirstate only has a docket in v2 format'
240 b'dirstate only has a docket in v2 format'
241 )
241 )
242 self._docket = docketmod.DirstateDocket.parse(
242 self._docket = docketmod.DirstateDocket.parse(
243 self._readdirstatefile(), self._nodeconstants
243 self._readdirstatefile(), self._nodeconstants
244 )
244 )
245 return self._docket
245 return self._docket
246
246
247 def write_v2_no_append(self, tr, st, meta, packed):
247 def write_v2_no_append(self, tr, st, meta, packed):
248 old_docket = self.docket
248 old_docket = self.docket
249 new_docket = docketmod.DirstateDocket.with_new_uuid(
249 new_docket = docketmod.DirstateDocket.with_new_uuid(
250 self.parents(), len(packed), meta
250 self.parents(), len(packed), meta
251 )
251 )
252 data_filename = new_docket.data_filename()
252 data_filename = new_docket.data_filename()
253 if tr:
253 if tr:
254 tr.add(data_filename, 0)
254 tr.add(data_filename, 0)
255 self._opener.write(data_filename, packed)
255 self._opener.write(data_filename, packed)
256 # Write the new docket after the new data file has been
256 # Write the new docket after the new data file has been
257 # written. Because `st` was opened with `atomictemp=True`,
257 # written. Because `st` was opened with `atomictemp=True`,
258 # the actual `.hg/dirstate` file is only affected on close.
258 # the actual `.hg/dirstate` file is only affected on close.
259 st.write(new_docket.serialize())
259 st.write(new_docket.serialize())
260 st.close()
260 st.close()
261 # Remove the old data file after the new docket pointing to
261 # Remove the old data file after the new docket pointing to
262 # the new data file was written.
262 # the new data file was written.
263 if old_docket.uuid:
263 if old_docket.uuid:
264 data_filename = old_docket.data_filename()
264 data_filename = old_docket.data_filename()
265 unlink = lambda _tr=None: self._opener.unlink(data_filename)
265 unlink = lambda _tr=None: self._opener.unlink(data_filename)
266 if tr:
266 if tr:
267 category = b"dirstate-v2-clean-" + old_docket.uuid
267 category = b"dirstate-v2-clean-" + old_docket.uuid
268 tr.addpostclose(category, unlink)
268 tr.addpostclose(category, unlink)
269 else:
269 else:
270 unlink()
270 unlink()
271 self._docket = new_docket
271 self._docket = new_docket
272
272
273 ### reading/setting parents
273 ### reading/setting parents
274
274
275 def parents(self):
275 def parents(self):
276 if not self._parents:
276 if not self._parents:
277 if self._use_dirstate_v2:
277 if self._use_dirstate_v2:
278 self._parents = self.docket.parents
278 self._parents = self.docket.parents
279 else:
279 else:
280 read_len = self._nodelen * 2
280 read_len = self._nodelen * 2
281 st = self._readdirstatefile(read_len)
281 st = self._readdirstatefile(read_len)
282 l = len(st)
282 l = len(st)
283 if l == read_len:
283 if l == read_len:
284 self._parents = (
284 self._parents = (
285 st[: self._nodelen],
285 st[: self._nodelen],
286 st[self._nodelen : 2 * self._nodelen],
286 st[self._nodelen : 2 * self._nodelen],
287 )
287 )
288 elif l == 0:
288 elif l == 0:
289 self._parents = (
289 self._parents = (
290 self._nodeconstants.nullid,
290 self._nodeconstants.nullid,
291 self._nodeconstants.nullid,
291 self._nodeconstants.nullid,
292 )
292 )
293 else:
293 else:
294 raise error.Abort(
294 raise error.Abort(
295 _(b'working directory state appears damaged!')
295 _(b'working directory state appears damaged!')
296 )
296 )
297
297
298 return self._parents
298 return self._parents
299
299
300
300
301 class dirstatemap(_dirstatemapcommon):
301 class dirstatemap(_dirstatemapcommon):
302 """Map encapsulating the dirstate's contents.
302 """Map encapsulating the dirstate's contents.
303
303
304 The dirstate contains the following state:
304 The dirstate contains the following state:
305
305
306 - `identity` is the identity of the dirstate file, which can be used to
306 - `identity` is the identity of the dirstate file, which can be used to
307 detect when changes have occurred to the dirstate file.
307 detect when changes have occurred to the dirstate file.
308
308
309 - `parents` is a pair containing the parents of the working copy. The
309 - `parents` is a pair containing the parents of the working copy. The
310 parents are updated by calling `setparents`.
310 parents are updated by calling `setparents`.
311
311
312 - the state map maps filenames to tuples of (state, mode, size, mtime),
312 - the state map maps filenames to tuples of (state, mode, size, mtime),
313 where state is a single character representing 'normal', 'added',
313 where state is a single character representing 'normal', 'added',
314 'removed', or 'merged'. It is read by treating the dirstate as a
314 'removed', or 'merged'. It is read by treating the dirstate as a
315 dict. File state is updated by calling various methods (see each
315 dict. File state is updated by calling various methods (see each
316 documentation for details):
316 documentation for details):
317
317
318 - `reset_state`,
318 - `reset_state`,
319 - `set_tracked`
319 - `set_tracked`
320 - `set_untracked`
320 - `set_untracked`
321 - `set_clean`
321 - `set_clean`
322 - `set_possibly_dirty`
322 - `set_possibly_dirty`
323
323
324 - `copymap` maps destination filenames to their source filename.
324 - `copymap` maps destination filenames to their source filename.
325
325
326 The dirstate also provides the following views onto the state:
326 The dirstate also provides the following views onto the state:
327
327
328 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
328 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
329 form that they appear as in the dirstate.
329 form that they appear as in the dirstate.
330
330
331 - `dirfoldmap` is a dict mapping normalized directory names to the
331 - `dirfoldmap` is a dict mapping normalized directory names to the
332 denormalized form that they appear as in the dirstate.
332 denormalized form that they appear as in the dirstate.
333 """
333 """
334
334
335 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
336 super(dirstatemap, self).__init__(
337 ui, opener, root, nodeconstants, use_dirstate_v2
338 )
339 if self._use_dirstate_v2:
340 msg = "Dirstate V2 not supportedi"
341 msg += "(should have detected unsupported requirement)"
342 raise error.ProgrammingError(msg)
343
344 ### Core data storage and access
335 ### Core data storage and access
345
336
346 @propertycache
337 @propertycache
347 def _map(self):
338 def _map(self):
348 self._map = {}
339 self._map = {}
349 self.read()
340 self.read()
350 return self._map
341 return self._map
351
342
352 @propertycache
343 @propertycache
353 def copymap(self):
344 def copymap(self):
354 self.copymap = {}
345 self.copymap = {}
355 self._map
346 self._map
356 return self.copymap
347 return self.copymap
357
348
358 def clear(self):
349 def clear(self):
359 self._map.clear()
350 self._map.clear()
360 self.copymap.clear()
351 self.copymap.clear()
361 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
352 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
362 util.clearcachedproperty(self, b"_dirs")
353 util.clearcachedproperty(self, b"_dirs")
363 util.clearcachedproperty(self, b"_alldirs")
354 util.clearcachedproperty(self, b"_alldirs")
364 util.clearcachedproperty(self, b"filefoldmap")
355 util.clearcachedproperty(self, b"filefoldmap")
365 util.clearcachedproperty(self, b"dirfoldmap")
356 util.clearcachedproperty(self, b"dirfoldmap")
366
357
367 def items(self):
358 def items(self):
368 return pycompat.iteritems(self._map)
359 return pycompat.iteritems(self._map)
369
360
370 # forward for python2,3 compat
361 # forward for python2,3 compat
371 iteritems = items
362 iteritems = items
372
363
373 def debug_iter(self, all):
364 def debug_iter(self, all):
374 """
365 """
375 Return an iterator of (filename, state, mode, size, mtime) tuples
366 Return an iterator of (filename, state, mode, size, mtime) tuples
376
367
377 `all` is unused when Rust is not enabled
368 `all` is unused when Rust is not enabled
378 """
369 """
379 for (filename, item) in self.items():
370 for (filename, item) in self.items():
380 yield (filename, item.state, item.mode, item.size, item.mtime)
371 yield (filename, item.state, item.mode, item.size, item.mtime)
381
372
382 def keys(self):
373 def keys(self):
383 return self._map.keys()
374 return self._map.keys()
384
375
385 ### reading/setting parents
376 ### reading/setting parents
386
377
387 def setparents(self, p1, p2, fold_p2=False):
378 def setparents(self, p1, p2, fold_p2=False):
388 self._parents = (p1, p2)
379 self._parents = (p1, p2)
389 self._dirtyparents = True
380 self._dirtyparents = True
390 copies = {}
381 copies = {}
391 if fold_p2:
382 if fold_p2:
392 for f, s in pycompat.iteritems(self._map):
383 for f, s in pycompat.iteritems(self._map):
393 # Discard "merged" markers when moving away from a merge state
384 # Discard "merged" markers when moving away from a merge state
394 if s.p2_info:
385 if s.p2_info:
395 source = self.copymap.pop(f, None)
386 source = self.copymap.pop(f, None)
396 if source:
387 if source:
397 copies[f] = source
388 copies[f] = source
398 s.drop_merge_data()
389 s.drop_merge_data()
399 return copies
390 return copies
400
391
401 ### disk interaction
392 ### disk interaction
402
393
403 def read(self):
394 def read(self):
404 # ignore HG_PENDING because identity is used only for writing
395 # ignore HG_PENDING because identity is used only for writing
405 self.identity = util.filestat.frompath(
396 self.identity = util.filestat.frompath(
406 self._opener.join(self._filename)
397 self._opener.join(self._filename)
407 )
398 )
408
399
409 try:
400 if self._use_dirstate_v2:
410 fp = self._opendirstatefile()
401 if not self.docket.uuid:
411 try:
402 return
412 st = fp.read()
403 st = self._opener.read(self.docket.data_filename())
413 finally:
404 else:
414 fp.close()
405 st = self._readdirstatefile()
415 except IOError as err:
406
416 if err.errno != errno.ENOENT:
417 raise
418 return
419 if not st:
407 if not st:
420 return
408 return
421
409
410 # TODO: adjust this estimate for dirstate-v2
422 if util.safehasattr(parsers, b'dict_new_presized'):
411 if util.safehasattr(parsers, b'dict_new_presized'):
423 # Make an estimate of the number of files in the dirstate based on
412 # Make an estimate of the number of files in the dirstate based on
424 # its size. This trades wasting some memory for avoiding costly
413 # its size. This trades wasting some memory for avoiding costly
425 # resizes. Each entry have a prefix of 17 bytes followed by one or
414 # resizes. Each entry have a prefix of 17 bytes followed by one or
426 # two path names. Studies on various large-scale real-world repositories
415 # two path names. Studies on various large-scale real-world repositories
427 # found 54 bytes a reasonable upper limit for the average path names.
416 # found 54 bytes a reasonable upper limit for the average path names.
428 # Copy entries are ignored for the sake of this estimate.
417 # Copy entries are ignored for the sake of this estimate.
429 self._map = parsers.dict_new_presized(len(st) // 71)
418 self._map = parsers.dict_new_presized(len(st) // 71)
430
419
431 # Python's garbage collector triggers a GC each time a certain number
420 # Python's garbage collector triggers a GC each time a certain number
432 # of container objects (the number being defined by
421 # of container objects (the number being defined by
433 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
422 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
434 # for each file in the dirstate. The C version then immediately marks
423 # for each file in the dirstate. The C version then immediately marks
435 # them as not to be tracked by the collector. However, this has no
424 # them as not to be tracked by the collector. However, this has no
436 # effect on when GCs are triggered, only on what objects the GC looks
425 # effect on when GCs are triggered, only on what objects the GC looks
437 # into. This means that O(number of files) GCs are unavoidable.
426 # into. This means that O(number of files) GCs are unavoidable.
438 # Depending on when in the process's lifetime the dirstate is parsed,
427 # Depending on when in the process's lifetime the dirstate is parsed,
439 # this can get very expensive. As a workaround, disable GC while
428 # this can get very expensive. As a workaround, disable GC while
440 # parsing the dirstate.
429 # parsing the dirstate.
441 #
430 #
442 # (we cannot decorate the function directly since it is in a C module)
431 # (we cannot decorate the function directly since it is in a C module)
443 parse_dirstate = util.nogc(parsers.parse_dirstate)
432 if self._use_dirstate_v2:
444 p = parse_dirstate(self._map, self.copymap, st)
433 p = self.docket.parents
434 meta = self.docket.tree_metadata
435 parse_dirstate = util.nogc(v2.parse_dirstate)
436 parse_dirstate(self._map, self.copymap, st, meta)
437 else:
438 parse_dirstate = util.nogc(parsers.parse_dirstate)
439 p = parse_dirstate(self._map, self.copymap, st)
445 if not self._dirtyparents:
440 if not self._dirtyparents:
446 self.setparents(*p)
441 self.setparents(*p)
447
442
448 # Avoid excess attribute lookups by fast pathing certain checks
443 # Avoid excess attribute lookups by fast pathing certain checks
449 self.__contains__ = self._map.__contains__
444 self.__contains__ = self._map.__contains__
450 self.__getitem__ = self._map.__getitem__
445 self.__getitem__ = self._map.__getitem__
451 self.get = self._map.get
446 self.get = self._map.get
452
447
453 def write(self, _tr, st, now):
448 def write(self, tr, st, now):
454 d = parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
449 if self._use_dirstate_v2:
455 st.write(d)
450 packed, meta = v2.pack_dirstate(self._map, self.copymap, now)
456 st.close()
451 self.write_v2_no_append(tr, st, meta, packed)
452 else:
453 packed = parsers.pack_dirstate(
454 self._map, self.copymap, self.parents(), now
455 )
456 st.write(packed)
457 st.close()
457 self._dirtyparents = False
458 self._dirtyparents = False
458
459
459 @propertycache
460 @propertycache
460 def identity(self):
461 def identity(self):
461 self._map
462 self._map
462 return self.identity
463 return self.identity
463
464
464 ### code related to maintaining and accessing "extra" property
465 ### code related to maintaining and accessing "extra" property
465 # (e.g. "has_dir")
466 # (e.g. "has_dir")
466
467
467 def _dirs_incr(self, filename, old_entry=None):
468 def _dirs_incr(self, filename, old_entry=None):
468 """incremente the dirstate counter if applicable"""
469 """incremente the dirstate counter if applicable"""
469 if (
470 if (
470 old_entry is None or old_entry.removed
471 old_entry is None or old_entry.removed
471 ) and "_dirs" in self.__dict__:
472 ) and "_dirs" in self.__dict__:
472 self._dirs.addpath(filename)
473 self._dirs.addpath(filename)
473 if old_entry is None and "_alldirs" in self.__dict__:
474 if old_entry is None and "_alldirs" in self.__dict__:
474 self._alldirs.addpath(filename)
475 self._alldirs.addpath(filename)
475
476
476 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
477 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
477 """decremente the dirstate counter if applicable"""
478 """decremente the dirstate counter if applicable"""
478 if old_entry is not None:
479 if old_entry is not None:
479 if "_dirs" in self.__dict__ and not old_entry.removed:
480 if "_dirs" in self.__dict__ and not old_entry.removed:
480 self._dirs.delpath(filename)
481 self._dirs.delpath(filename)
481 if "_alldirs" in self.__dict__ and not remove_variant:
482 if "_alldirs" in self.__dict__ and not remove_variant:
482 self._alldirs.delpath(filename)
483 self._alldirs.delpath(filename)
483 elif remove_variant and "_alldirs" in self.__dict__:
484 elif remove_variant and "_alldirs" in self.__dict__:
484 self._alldirs.addpath(filename)
485 self._alldirs.addpath(filename)
485 if "filefoldmap" in self.__dict__:
486 if "filefoldmap" in self.__dict__:
486 normed = util.normcase(filename)
487 normed = util.normcase(filename)
487 self.filefoldmap.pop(normed, None)
488 self.filefoldmap.pop(normed, None)
488
489
489 @propertycache
490 @propertycache
490 def filefoldmap(self):
491 def filefoldmap(self):
491 """Returns a dictionary mapping normalized case paths to their
492 """Returns a dictionary mapping normalized case paths to their
492 non-normalized versions.
493 non-normalized versions.
493 """
494 """
494 try:
495 try:
495 makefilefoldmap = parsers.make_file_foldmap
496 makefilefoldmap = parsers.make_file_foldmap
496 except AttributeError:
497 except AttributeError:
497 pass
498 pass
498 else:
499 else:
499 return makefilefoldmap(
500 return makefilefoldmap(
500 self._map, util.normcasespec, util.normcasefallback
501 self._map, util.normcasespec, util.normcasefallback
501 )
502 )
502
503
503 f = {}
504 f = {}
504 normcase = util.normcase
505 normcase = util.normcase
505 for name, s in pycompat.iteritems(self._map):
506 for name, s in pycompat.iteritems(self._map):
506 if not s.removed:
507 if not s.removed:
507 f[normcase(name)] = name
508 f[normcase(name)] = name
508 f[b'.'] = b'.' # prevents useless util.fspath() invocation
509 f[b'.'] = b'.' # prevents useless util.fspath() invocation
509 return f
510 return f
510
511
511 @propertycache
512 @propertycache
512 def dirfoldmap(self):
513 def dirfoldmap(self):
513 f = {}
514 f = {}
514 normcase = util.normcase
515 normcase = util.normcase
515 for name in self._dirs:
516 for name in self._dirs:
516 f[normcase(name)] = name
517 f[normcase(name)] = name
517 return f
518 return f
518
519
519 def hastrackeddir(self, d):
520 def hastrackeddir(self, d):
520 """
521 """
521 Returns True if the dirstate contains a tracked (not removed) file
522 Returns True if the dirstate contains a tracked (not removed) file
522 in this directory.
523 in this directory.
523 """
524 """
524 return d in self._dirs
525 return d in self._dirs
525
526
526 def hasdir(self, d):
527 def hasdir(self, d):
527 """
528 """
528 Returns True if the dirstate contains a file (tracked or removed)
529 Returns True if the dirstate contains a file (tracked or removed)
529 in this directory.
530 in this directory.
530 """
531 """
531 return d in self._alldirs
532 return d in self._alldirs
532
533
533 @propertycache
534 @propertycache
534 def _dirs(self):
535 def _dirs(self):
535 return pathutil.dirs(self._map, only_tracked=True)
536 return pathutil.dirs(self._map, only_tracked=True)
536
537
537 @propertycache
538 @propertycache
538 def _alldirs(self):
539 def _alldirs(self):
539 return pathutil.dirs(self._map)
540 return pathutil.dirs(self._map)
540
541
541 ### code related to manipulation of entries and copy-sources
542 ### code related to manipulation of entries and copy-sources
542
543
543 def _refresh_entry(self, f, entry):
544 def _refresh_entry(self, f, entry):
544 if not entry.any_tracked:
545 if not entry.any_tracked:
545 self._map.pop(f, None)
546 self._map.pop(f, None)
546
547
547 def _insert_entry(self, f, entry):
548 def _insert_entry(self, f, entry):
548 self._map[f] = entry
549 self._map[f] = entry
549
550
550 def _drop_entry(self, f):
551 def _drop_entry(self, f):
551 self._map.pop(f, None)
552 self._map.pop(f, None)
552 self.copymap.pop(f, None)
553 self.copymap.pop(f, None)
553
554
554
555
555 if rustmod is not None:
556 if rustmod is not None:
556
557
557 class dirstatemap(_dirstatemapcommon):
558 class dirstatemap(_dirstatemapcommon):
558
559
559 ### Core data storage and access
560 ### Core data storage and access
560
561
561 @propertycache
562 @propertycache
562 def _map(self):
563 def _map(self):
563 """
564 """
564 Fills the Dirstatemap when called.
565 Fills the Dirstatemap when called.
565 """
566 """
566 # ignore HG_PENDING because identity is used only for writing
567 # ignore HG_PENDING because identity is used only for writing
567 self.identity = util.filestat.frompath(
568 self.identity = util.filestat.frompath(
568 self._opener.join(self._filename)
569 self._opener.join(self._filename)
569 )
570 )
570
571
571 if self._use_dirstate_v2:
572 if self._use_dirstate_v2:
572 if self.docket.uuid:
573 if self.docket.uuid:
573 # TODO: use mmap when possible
574 # TODO: use mmap when possible
574 data = self._opener.read(self.docket.data_filename())
575 data = self._opener.read(self.docket.data_filename())
575 else:
576 else:
576 data = b''
577 data = b''
577 self._map = rustmod.DirstateMap.new_v2(
578 self._map = rustmod.DirstateMap.new_v2(
578 data, self.docket.data_size, self.docket.tree_metadata
579 data, self.docket.data_size, self.docket.tree_metadata
579 )
580 )
580 parents = self.docket.parents
581 parents = self.docket.parents
581 else:
582 else:
582 self._map, parents = rustmod.DirstateMap.new_v1(
583 self._map, parents = rustmod.DirstateMap.new_v1(
583 self._readdirstatefile()
584 self._readdirstatefile()
584 )
585 )
585
586
586 if parents and not self._dirtyparents:
587 if parents and not self._dirtyparents:
587 self.setparents(*parents)
588 self.setparents(*parents)
588
589
589 self.__contains__ = self._map.__contains__
590 self.__contains__ = self._map.__contains__
590 self.__getitem__ = self._map.__getitem__
591 self.__getitem__ = self._map.__getitem__
591 self.get = self._map.get
592 self.get = self._map.get
592 return self._map
593 return self._map
593
594
594 @property
595 @property
595 def copymap(self):
596 def copymap(self):
596 return self._map.copymap()
597 return self._map.copymap()
597
598
598 def debug_iter(self, all):
599 def debug_iter(self, all):
599 """
600 """
600 Return an iterator of (filename, state, mode, size, mtime) tuples
601 Return an iterator of (filename, state, mode, size, mtime) tuples
601
602
602 `all`: also include with `state == b' '` dirstate tree nodes that
603 `all`: also include with `state == b' '` dirstate tree nodes that
603 don't have an associated `DirstateItem`.
604 don't have an associated `DirstateItem`.
604
605
605 """
606 """
606 return self._map.debug_iter(all)
607 return self._map.debug_iter(all)
607
608
608 def clear(self):
609 def clear(self):
609 self._map.clear()
610 self._map.clear()
610 self.setparents(
611 self.setparents(
611 self._nodeconstants.nullid, self._nodeconstants.nullid
612 self._nodeconstants.nullid, self._nodeconstants.nullid
612 )
613 )
613 util.clearcachedproperty(self, b"_dirs")
614 util.clearcachedproperty(self, b"_dirs")
614 util.clearcachedproperty(self, b"_alldirs")
615 util.clearcachedproperty(self, b"_alldirs")
615 util.clearcachedproperty(self, b"dirfoldmap")
616 util.clearcachedproperty(self, b"dirfoldmap")
616
617
617 def items(self):
618 def items(self):
618 return self._map.items()
619 return self._map.items()
619
620
620 # forward for python2,3 compat
621 # forward for python2,3 compat
621 iteritems = items
622 iteritems = items
622
623
623 def keys(self):
624 def keys(self):
624 return iter(self._map)
625 return iter(self._map)
625
626
626 ### reading/setting parents
627 ### reading/setting parents
627
628
628 def setparents(self, p1, p2, fold_p2=False):
629 def setparents(self, p1, p2, fold_p2=False):
629 self._parents = (p1, p2)
630 self._parents = (p1, p2)
630 self._dirtyparents = True
631 self._dirtyparents = True
631 copies = {}
632 copies = {}
632 if fold_p2:
633 if fold_p2:
633 # Collect into an intermediate list to avoid a `RuntimeError`
634 # Collect into an intermediate list to avoid a `RuntimeError`
634 # exception due to mutation during iteration.
635 # exception due to mutation during iteration.
635 # TODO: move this the whole loop to Rust where `iter_mut`
636 # TODO: move this the whole loop to Rust where `iter_mut`
636 # enables in-place mutation of elements of a collection while
637 # enables in-place mutation of elements of a collection while
637 # iterating it, without mutating the collection itself.
638 # iterating it, without mutating the collection itself.
638 files_with_p2_info = [
639 files_with_p2_info = [
639 f for f, s in self._map.items() if s.p2_info
640 f for f, s in self._map.items() if s.p2_info
640 ]
641 ]
641 rust_map = self._map
642 rust_map = self._map
642 for f in files_with_p2_info:
643 for f in files_with_p2_info:
643 e = rust_map.get(f)
644 e = rust_map.get(f)
644 source = self.copymap.pop(f, None)
645 source = self.copymap.pop(f, None)
645 if source:
646 if source:
646 copies[f] = source
647 copies[f] = source
647 e.drop_merge_data()
648 e.drop_merge_data()
648 rust_map.set_dirstate_item(f, e)
649 rust_map.set_dirstate_item(f, e)
649 return copies
650 return copies
650
651
651 ### disk interaction
652 ### disk interaction
652
653
653 @propertycache
654 @propertycache
654 def identity(self):
655 def identity(self):
655 self._map
656 self._map
656 return self.identity
657 return self.identity
657
658
658 def write(self, tr, st, now):
659 def write(self, tr, st, now):
659 if not self._use_dirstate_v2:
660 if not self._use_dirstate_v2:
660 p1, p2 = self.parents()
661 p1, p2 = self.parents()
661 packed = self._map.write_v1(p1, p2, now)
662 packed = self._map.write_v1(p1, p2, now)
662 st.write(packed)
663 st.write(packed)
663 st.close()
664 st.close()
664 self._dirtyparents = False
665 self._dirtyparents = False
665 return
666 return
666
667
667 # We can only append to an existing data file if there is one
668 # We can only append to an existing data file if there is one
668 can_append = self.docket.uuid is not None
669 can_append = self.docket.uuid is not None
669 packed, meta, append = self._map.write_v2(now, can_append)
670 packed, meta, append = self._map.write_v2(now, can_append)
670 if append:
671 if append:
671 docket = self.docket
672 docket = self.docket
672 data_filename = docket.data_filename()
673 data_filename = docket.data_filename()
673 if tr:
674 if tr:
674 tr.add(data_filename, docket.data_size)
675 tr.add(data_filename, docket.data_size)
675 with self._opener(data_filename, b'r+b') as fp:
676 with self._opener(data_filename, b'r+b') as fp:
676 fp.seek(docket.data_size)
677 fp.seek(docket.data_size)
677 assert fp.tell() == docket.data_size
678 assert fp.tell() == docket.data_size
678 written = fp.write(packed)
679 written = fp.write(packed)
679 if written is not None: # py2 may return None
680 if written is not None: # py2 may return None
680 assert written == len(packed), (written, len(packed))
681 assert written == len(packed), (written, len(packed))
681 docket.data_size += len(packed)
682 docket.data_size += len(packed)
682 docket.parents = self.parents()
683 docket.parents = self.parents()
683 docket.tree_metadata = meta
684 docket.tree_metadata = meta
684 st.write(docket.serialize())
685 st.write(docket.serialize())
685 st.close()
686 st.close()
686 else:
687 else:
687 self.write_v2_no_append(tr, st, meta, packed)
688 self.write_v2_no_append(tr, st, meta, packed)
688 # Reload from the newly-written file
689 # Reload from the newly-written file
689 util.clearcachedproperty(self, b"_map")
690 util.clearcachedproperty(self, b"_map")
690 self._dirtyparents = False
691 self._dirtyparents = False
691
692
692 ### code related to maintaining and accessing "extra" property
693 ### code related to maintaining and accessing "extra" property
693 # (e.g. "has_dir")
694 # (e.g. "has_dir")
694
695
695 @propertycache
696 @propertycache
696 def filefoldmap(self):
697 def filefoldmap(self):
697 """Returns a dictionary mapping normalized case paths to their
698 """Returns a dictionary mapping normalized case paths to their
698 non-normalized versions.
699 non-normalized versions.
699 """
700 """
700 return self._map.filefoldmapasdict()
701 return self._map.filefoldmapasdict()
701
702
702 def hastrackeddir(self, d):
703 def hastrackeddir(self, d):
703 return self._map.hastrackeddir(d)
704 return self._map.hastrackeddir(d)
704
705
705 def hasdir(self, d):
706 def hasdir(self, d):
706 return self._map.hasdir(d)
707 return self._map.hasdir(d)
707
708
708 @propertycache
709 @propertycache
709 def dirfoldmap(self):
710 def dirfoldmap(self):
710 f = {}
711 f = {}
711 normcase = util.normcase
712 normcase = util.normcase
712 for name in self._map.tracked_dirs():
713 for name in self._map.tracked_dirs():
713 f[normcase(name)] = name
714 f[normcase(name)] = name
714 return f
715 return f
715
716
716 ### code related to manipulation of entries and copy-sources
717 ### code related to manipulation of entries and copy-sources
717
718
718 def _refresh_entry(self, f, entry):
719 def _refresh_entry(self, f, entry):
719 if not entry.any_tracked:
720 if not entry.any_tracked:
720 self._map.drop_item_and_copy_source(f)
721 self._map.drop_item_and_copy_source(f)
721 else:
722 else:
722 self._map.addfile(f, entry)
723 self._map.addfile(f, entry)
723
724
724 def _insert_entry(self, f, entry):
725 def _insert_entry(self, f, entry):
725 self._map.addfile(f, entry)
726 self._map.addfile(f, entry)
726
727
727 def _drop_entry(self, f):
728 def _drop_entry(self, f):
728 self._map.drop_item_and_copy_source(f)
729 self._map.drop_item_and_copy_source(f)
729
730
730 def __setitem__(self, key, value):
731 def __setitem__(self, key, value):
731 assert isinstance(value, DirstateItem)
732 assert isinstance(value, DirstateItem)
732 self._map.set_dirstate_item(key, value)
733 self._map.set_dirstate_item(key, value)
@@ -1,3881 +1,3871 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 sha1nodeconstants,
23 sha1nodeconstants,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 wireprototypes,
74 wireprototypes,
75 )
75 )
76
76
77 from .interfaces import (
77 from .interfaces import (
78 repository,
78 repository,
79 util as interfaceutil,
79 util as interfaceutil,
80 )
80 )
81
81
82 from .utils import (
82 from .utils import (
83 hashutil,
83 hashutil,
84 procutil,
84 procutil,
85 stringutil,
85 stringutil,
86 urlutil,
86 urlutil,
87 )
87 )
88
88
89 from .revlogutils import (
89 from .revlogutils import (
90 concurrency_checker as revlogchecker,
90 concurrency_checker as revlogchecker,
91 constants as revlogconst,
91 constants as revlogconst,
92 sidedata as sidedatamod,
92 sidedata as sidedatamod,
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96 urlerr = util.urlerr
96 urlerr = util.urlerr
97 urlreq = util.urlreq
97 urlreq = util.urlreq
98
98
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
101 # - '' for svfs relative paths
102 _cachedfiles = set()
102 _cachedfiles = set()
103
103
104
104
105 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107
107
108 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
109 if repo is None:
109 if repo is None:
110 return self
110 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 try:
113 try:
114 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
115 except KeyError:
115 except KeyError:
116 pass
116 pass
117 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
118
118
119 def set(self, repo, value):
119 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
121
122
122
123 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
125
125
126 def __init__(self, *paths):
126 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
128 for path in paths:
128 for path in paths:
129 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
130
130
131 def join(self, obj, fname):
131 def join(self, obj, fname):
132 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
133
133
134
134
135 class storecache(_basefilecache):
135 class storecache(_basefilecache):
136 """filecache for files in the store"""
136 """filecache for files in the store"""
137
137
138 def __init__(self, *paths):
138 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
140 for path in paths:
140 for path in paths:
141 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
142
142
143 def join(self, obj, fname):
143 def join(self, obj, fname):
144 return obj.sjoin(fname)
144 return obj.sjoin(fname)
145
145
146
146
147 class changelogcache(storecache):
147 class changelogcache(storecache):
148 """filecache for the changelog"""
148 """filecache for the changelog"""
149
149
150 def __init__(self):
150 def __init__(self):
151 super(changelogcache, self).__init__()
151 super(changelogcache, self).__init__()
152 _cachedfiles.add((b'00changelog.i', b''))
152 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
154
154
155 def tracked_paths(self, obj):
155 def tracked_paths(self, obj):
156 paths = [self.join(obj, b'00changelog.i')]
156 paths = [self.join(obj, b'00changelog.i')]
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 paths.append(self.join(obj, b'00changelog.n'))
158 paths.append(self.join(obj, b'00changelog.n'))
159 return paths
159 return paths
160
160
161
161
162 class manifestlogcache(storecache):
162 class manifestlogcache(storecache):
163 """filecache for the manifestlog"""
163 """filecache for the manifestlog"""
164
164
165 def __init__(self):
165 def __init__(self):
166 super(manifestlogcache, self).__init__()
166 super(manifestlogcache, self).__init__()
167 _cachedfiles.add((b'00manifest.i', b''))
167 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.n', b''))
168 _cachedfiles.add((b'00manifest.n', b''))
169
169
170 def tracked_paths(self, obj):
170 def tracked_paths(self, obj):
171 paths = [self.join(obj, b'00manifest.i')]
171 paths = [self.join(obj, b'00manifest.i')]
172 if obj.store.opener.options.get(b'persistent-nodemap', False):
172 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 paths.append(self.join(obj, b'00manifest.n'))
173 paths.append(self.join(obj, b'00manifest.n'))
174 return paths
174 return paths
175
175
176
176
177 class mixedrepostorecache(_basefilecache):
177 class mixedrepostorecache(_basefilecache):
178 """filecache for a mix files in .hg/store and outside"""
178 """filecache for a mix files in .hg/store and outside"""
179
179
180 def __init__(self, *pathsandlocations):
180 def __init__(self, *pathsandlocations):
181 # scmutil.filecache only uses the path for passing back into our
181 # scmutil.filecache only uses the path for passing back into our
182 # join(), so we can safely pass a list of paths and locations
182 # join(), so we can safely pass a list of paths and locations
183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 _cachedfiles.update(pathsandlocations)
184 _cachedfiles.update(pathsandlocations)
185
185
186 def join(self, obj, fnameandlocation):
186 def join(self, obj, fnameandlocation):
187 fname, location = fnameandlocation
187 fname, location = fnameandlocation
188 if location == b'plain':
188 if location == b'plain':
189 return obj.vfs.join(fname)
189 return obj.vfs.join(fname)
190 else:
190 else:
191 if location != b'':
191 if location != b'':
192 raise error.ProgrammingError(
192 raise error.ProgrammingError(
193 b'unexpected location: %s' % location
193 b'unexpected location: %s' % location
194 )
194 )
195 return obj.sjoin(fname)
195 return obj.sjoin(fname)
196
196
197
197
198 def isfilecached(repo, name):
198 def isfilecached(repo, name):
199 """check if a repo has already cached "name" filecache-ed property
199 """check if a repo has already cached "name" filecache-ed property
200
200
201 This returns (cachedobj-or-None, iscached) tuple.
201 This returns (cachedobj-or-None, iscached) tuple.
202 """
202 """
203 cacheentry = repo.unfiltered()._filecache.get(name, None)
203 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 if not cacheentry:
204 if not cacheentry:
205 return None, False
205 return None, False
206 return cacheentry.obj, True
206 return cacheentry.obj, True
207
207
208
208
209 class unfilteredpropertycache(util.propertycache):
209 class unfilteredpropertycache(util.propertycache):
210 """propertycache that apply to unfiltered repo only"""
210 """propertycache that apply to unfiltered repo only"""
211
211
212 def __get__(self, repo, type=None):
212 def __get__(self, repo, type=None):
213 unfi = repo.unfiltered()
213 unfi = repo.unfiltered()
214 if unfi is repo:
214 if unfi is repo:
215 return super(unfilteredpropertycache, self).__get__(unfi)
215 return super(unfilteredpropertycache, self).__get__(unfi)
216 return getattr(unfi, self.name)
216 return getattr(unfi, self.name)
217
217
218
218
219 class filteredpropertycache(util.propertycache):
219 class filteredpropertycache(util.propertycache):
220 """propertycache that must take filtering in account"""
220 """propertycache that must take filtering in account"""
221
221
222 def cachevalue(self, obj, value):
222 def cachevalue(self, obj, value):
223 object.__setattr__(obj, self.name, value)
223 object.__setattr__(obj, self.name, value)
224
224
225
225
226 def hasunfilteredcache(repo, name):
226 def hasunfilteredcache(repo, name):
227 """check if a repo has an unfilteredpropertycache value for <name>"""
227 """check if a repo has an unfilteredpropertycache value for <name>"""
228 return name in vars(repo.unfiltered())
228 return name in vars(repo.unfiltered())
229
229
230
230
231 def unfilteredmethod(orig):
231 def unfilteredmethod(orig):
232 """decorate method that always need to be run on unfiltered version"""
232 """decorate method that always need to be run on unfiltered version"""
233
233
234 @functools.wraps(orig)
234 @functools.wraps(orig)
235 def wrapper(repo, *args, **kwargs):
235 def wrapper(repo, *args, **kwargs):
236 return orig(repo.unfiltered(), *args, **kwargs)
236 return orig(repo.unfiltered(), *args, **kwargs)
237
237
238 return wrapper
238 return wrapper
239
239
240
240
241 moderncaps = {
241 moderncaps = {
242 b'lookup',
242 b'lookup',
243 b'branchmap',
243 b'branchmap',
244 b'pushkey',
244 b'pushkey',
245 b'known',
245 b'known',
246 b'getbundle',
246 b'getbundle',
247 b'unbundle',
247 b'unbundle',
248 }
248 }
249 legacycaps = moderncaps.union({b'changegroupsubset'})
249 legacycaps = moderncaps.union({b'changegroupsubset'})
250
250
251
251
252 @interfaceutil.implementer(repository.ipeercommandexecutor)
252 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 class localcommandexecutor(object):
253 class localcommandexecutor(object):
254 def __init__(self, peer):
254 def __init__(self, peer):
255 self._peer = peer
255 self._peer = peer
256 self._sent = False
256 self._sent = False
257 self._closed = False
257 self._closed = False
258
258
259 def __enter__(self):
259 def __enter__(self):
260 return self
260 return self
261
261
262 def __exit__(self, exctype, excvalue, exctb):
262 def __exit__(self, exctype, excvalue, exctb):
263 self.close()
263 self.close()
264
264
265 def callcommand(self, command, args):
265 def callcommand(self, command, args):
266 if self._sent:
266 if self._sent:
267 raise error.ProgrammingError(
267 raise error.ProgrammingError(
268 b'callcommand() cannot be used after sendcommands()'
268 b'callcommand() cannot be used after sendcommands()'
269 )
269 )
270
270
271 if self._closed:
271 if self._closed:
272 raise error.ProgrammingError(
272 raise error.ProgrammingError(
273 b'callcommand() cannot be used after close()'
273 b'callcommand() cannot be used after close()'
274 )
274 )
275
275
276 # We don't need to support anything fancy. Just call the named
276 # We don't need to support anything fancy. Just call the named
277 # method on the peer and return a resolved future.
277 # method on the peer and return a resolved future.
278 fn = getattr(self._peer, pycompat.sysstr(command))
278 fn = getattr(self._peer, pycompat.sysstr(command))
279
279
280 f = pycompat.futures.Future()
280 f = pycompat.futures.Future()
281
281
282 try:
282 try:
283 result = fn(**pycompat.strkwargs(args))
283 result = fn(**pycompat.strkwargs(args))
284 except Exception:
284 except Exception:
285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 else:
286 else:
287 f.set_result(result)
287 f.set_result(result)
288
288
289 return f
289 return f
290
290
291 def sendcommands(self):
291 def sendcommands(self):
292 self._sent = True
292 self._sent = True
293
293
294 def close(self):
294 def close(self):
295 self._closed = True
295 self._closed = True
296
296
297
297
298 @interfaceutil.implementer(repository.ipeercommands)
298 @interfaceutil.implementer(repository.ipeercommands)
299 class localpeer(repository.peer):
299 class localpeer(repository.peer):
300 '''peer for a local repo; reflects only the most recent API'''
300 '''peer for a local repo; reflects only the most recent API'''
301
301
302 def __init__(self, repo, caps=None):
302 def __init__(self, repo, caps=None):
303 super(localpeer, self).__init__()
303 super(localpeer, self).__init__()
304
304
305 if caps is None:
305 if caps is None:
306 caps = moderncaps.copy()
306 caps = moderncaps.copy()
307 self._repo = repo.filtered(b'served')
307 self._repo = repo.filtered(b'served')
308 self.ui = repo.ui
308 self.ui = repo.ui
309
309
310 if repo._wanted_sidedata:
310 if repo._wanted_sidedata:
311 formatted = bundle2.format_remote_wanted_sidedata(repo)
311 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 caps.add(b'exp-wanted-sidedata=' + formatted)
312 caps.add(b'exp-wanted-sidedata=' + formatted)
313
313
314 self._caps = repo._restrictcapabilities(caps)
314 self._caps = repo._restrictcapabilities(caps)
315
315
316 # Begin of _basepeer interface.
316 # Begin of _basepeer interface.
317
317
318 def url(self):
318 def url(self):
319 return self._repo.url()
319 return self._repo.url()
320
320
321 def local(self):
321 def local(self):
322 return self._repo
322 return self._repo
323
323
324 def peer(self):
324 def peer(self):
325 return self
325 return self
326
326
327 def canpush(self):
327 def canpush(self):
328 return True
328 return True
329
329
330 def close(self):
330 def close(self):
331 self._repo.close()
331 self._repo.close()
332
332
333 # End of _basepeer interface.
333 # End of _basepeer interface.
334
334
335 # Begin of _basewirecommands interface.
335 # Begin of _basewirecommands interface.
336
336
337 def branchmap(self):
337 def branchmap(self):
338 return self._repo.branchmap()
338 return self._repo.branchmap()
339
339
340 def capabilities(self):
340 def capabilities(self):
341 return self._caps
341 return self._caps
342
342
343 def clonebundles(self):
343 def clonebundles(self):
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345
345
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 """Used to test argument passing over the wire"""
347 """Used to test argument passing over the wire"""
348 return b"%s %s %s %s %s" % (
348 return b"%s %s %s %s %s" % (
349 one,
349 one,
350 two,
350 two,
351 pycompat.bytestr(three),
351 pycompat.bytestr(three),
352 pycompat.bytestr(four),
352 pycompat.bytestr(four),
353 pycompat.bytestr(five),
353 pycompat.bytestr(five),
354 )
354 )
355
355
356 def getbundle(
356 def getbundle(
357 self,
357 self,
358 source,
358 source,
359 heads=None,
359 heads=None,
360 common=None,
360 common=None,
361 bundlecaps=None,
361 bundlecaps=None,
362 remote_sidedata=None,
362 remote_sidedata=None,
363 **kwargs
363 **kwargs
364 ):
364 ):
365 chunks = exchange.getbundlechunks(
365 chunks = exchange.getbundlechunks(
366 self._repo,
366 self._repo,
367 source,
367 source,
368 heads=heads,
368 heads=heads,
369 common=common,
369 common=common,
370 bundlecaps=bundlecaps,
370 bundlecaps=bundlecaps,
371 remote_sidedata=remote_sidedata,
371 remote_sidedata=remote_sidedata,
372 **kwargs
372 **kwargs
373 )[1]
373 )[1]
374 cb = util.chunkbuffer(chunks)
374 cb = util.chunkbuffer(chunks)
375
375
376 if exchange.bundle2requested(bundlecaps):
376 if exchange.bundle2requested(bundlecaps):
377 # When requesting a bundle2, getbundle returns a stream to make the
377 # When requesting a bundle2, getbundle returns a stream to make the
378 # wire level function happier. We need to build a proper object
378 # wire level function happier. We need to build a proper object
379 # from it in local peer.
379 # from it in local peer.
380 return bundle2.getunbundler(self.ui, cb)
380 return bundle2.getunbundler(self.ui, cb)
381 else:
381 else:
382 return changegroup.getunbundler(b'01', cb, None)
382 return changegroup.getunbundler(b'01', cb, None)
383
383
384 def heads(self):
384 def heads(self):
385 return self._repo.heads()
385 return self._repo.heads()
386
386
387 def known(self, nodes):
387 def known(self, nodes):
388 return self._repo.known(nodes)
388 return self._repo.known(nodes)
389
389
390 def listkeys(self, namespace):
390 def listkeys(self, namespace):
391 return self._repo.listkeys(namespace)
391 return self._repo.listkeys(namespace)
392
392
393 def lookup(self, key):
393 def lookup(self, key):
394 return self._repo.lookup(key)
394 return self._repo.lookup(key)
395
395
396 def pushkey(self, namespace, key, old, new):
396 def pushkey(self, namespace, key, old, new):
397 return self._repo.pushkey(namespace, key, old, new)
397 return self._repo.pushkey(namespace, key, old, new)
398
398
399 def stream_out(self):
399 def stream_out(self):
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401
401
402 def unbundle(self, bundle, heads, url):
402 def unbundle(self, bundle, heads, url):
403 """apply a bundle on a repo
403 """apply a bundle on a repo
404
404
405 This function handles the repo locking itself."""
405 This function handles the repo locking itself."""
406 try:
406 try:
407 try:
407 try:
408 bundle = exchange.readbundle(self.ui, bundle, None)
408 bundle = exchange.readbundle(self.ui, bundle, None)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 if util.safehasattr(ret, b'getchunks'):
410 if util.safehasattr(ret, b'getchunks'):
411 # This is a bundle20 object, turn it into an unbundler.
411 # This is a bundle20 object, turn it into an unbundler.
412 # This little dance should be dropped eventually when the
412 # This little dance should be dropped eventually when the
413 # API is finally improved.
413 # API is finally improved.
414 stream = util.chunkbuffer(ret.getchunks())
414 stream = util.chunkbuffer(ret.getchunks())
415 ret = bundle2.getunbundler(self.ui, stream)
415 ret = bundle2.getunbundler(self.ui, stream)
416 return ret
416 return ret
417 except Exception as exc:
417 except Exception as exc:
418 # If the exception contains output salvaged from a bundle2
418 # If the exception contains output salvaged from a bundle2
419 # reply, we need to make sure it is printed before continuing
419 # reply, we need to make sure it is printed before continuing
420 # to fail. So we build a bundle2 with such output and consume
420 # to fail. So we build a bundle2 with such output and consume
421 # it directly.
421 # it directly.
422 #
422 #
423 # This is not very elegant but allows a "simple" solution for
423 # This is not very elegant but allows a "simple" solution for
424 # issue4594
424 # issue4594
425 output = getattr(exc, '_bundle2salvagedoutput', ())
425 output = getattr(exc, '_bundle2salvagedoutput', ())
426 if output:
426 if output:
427 bundler = bundle2.bundle20(self._repo.ui)
427 bundler = bundle2.bundle20(self._repo.ui)
428 for out in output:
428 for out in output:
429 bundler.addpart(out)
429 bundler.addpart(out)
430 stream = util.chunkbuffer(bundler.getchunks())
430 stream = util.chunkbuffer(bundler.getchunks())
431 b = bundle2.getunbundler(self.ui, stream)
431 b = bundle2.getunbundler(self.ui, stream)
432 bundle2.processbundle(self._repo, b)
432 bundle2.processbundle(self._repo, b)
433 raise
433 raise
434 except error.PushRaced as exc:
434 except error.PushRaced as exc:
435 raise error.ResponseError(
435 raise error.ResponseError(
436 _(b'push failed:'), stringutil.forcebytestr(exc)
436 _(b'push failed:'), stringutil.forcebytestr(exc)
437 )
437 )
438
438
439 # End of _basewirecommands interface.
439 # End of _basewirecommands interface.
440
440
441 # Begin of peer interface.
441 # Begin of peer interface.
442
442
443 def commandexecutor(self):
443 def commandexecutor(self):
444 return localcommandexecutor(self)
444 return localcommandexecutor(self)
445
445
446 # End of peer interface.
446 # End of peer interface.
447
447
448
448
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 class locallegacypeer(localpeer):
450 class locallegacypeer(localpeer):
451 """peer extension which implements legacy methods too; used for tests with
451 """peer extension which implements legacy methods too; used for tests with
452 restricted capabilities"""
452 restricted capabilities"""
453
453
454 def __init__(self, repo):
454 def __init__(self, repo):
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456
456
457 # Begin of baselegacywirecommands interface.
457 # Begin of baselegacywirecommands interface.
458
458
459 def between(self, pairs):
459 def between(self, pairs):
460 return self._repo.between(pairs)
460 return self._repo.between(pairs)
461
461
462 def branches(self, nodes):
462 def branches(self, nodes):
463 return self._repo.branches(nodes)
463 return self._repo.branches(nodes)
464
464
465 def changegroup(self, nodes, source):
465 def changegroup(self, nodes, source):
466 outgoing = discovery.outgoing(
466 outgoing = discovery.outgoing(
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 )
468 )
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470
470
471 def changegroupsubset(self, bases, heads, source):
471 def changegroupsubset(self, bases, heads, source):
472 outgoing = discovery.outgoing(
472 outgoing = discovery.outgoing(
473 self._repo, missingroots=bases, ancestorsof=heads
473 self._repo, missingroots=bases, ancestorsof=heads
474 )
474 )
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476
476
477 # End of baselegacywirecommands interface.
477 # End of baselegacywirecommands interface.
478
478
479
479
480 # Functions receiving (ui, features) that extensions can register to impact
480 # Functions receiving (ui, features) that extensions can register to impact
481 # the ability to load repositories with custom requirements. Only
481 # the ability to load repositories with custom requirements. Only
482 # functions defined in loaded extensions are called.
482 # functions defined in loaded extensions are called.
483 #
483 #
484 # The function receives a set of requirement strings that the repository
484 # The function receives a set of requirement strings that the repository
485 # is capable of opening. Functions will typically add elements to the
485 # is capable of opening. Functions will typically add elements to the
486 # set to reflect that the extension knows how to handle that requirements.
486 # set to reflect that the extension knows how to handle that requirements.
487 featuresetupfuncs = set()
487 featuresetupfuncs = set()
488
488
489
489
490 def _getsharedvfs(hgvfs, requirements):
490 def _getsharedvfs(hgvfs, requirements):
491 """returns the vfs object pointing to root of shared source
491 """returns the vfs object pointing to root of shared source
492 repo for a shared repository
492 repo for a shared repository
493
493
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
496 """
496 """
497 # The ``shared`` or ``relshared`` requirements indicate the
497 # The ``shared`` or ``relshared`` requirements indicate the
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # This is an absolute path for ``shared`` and relative to
499 # This is an absolute path for ``shared`` and relative to
500 # ``.hg/`` for ``relshared``.
500 # ``.hg/`` for ``relshared``.
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
504
504
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506
506
507 if not sharedvfs.exists():
507 if not sharedvfs.exists():
508 raise error.RepoError(
508 raise error.RepoError(
509 _(b'.hg/sharedpath points to nonexistent directory %s')
509 _(b'.hg/sharedpath points to nonexistent directory %s')
510 % sharedvfs.base
510 % sharedvfs.base
511 )
511 )
512 return sharedvfs
512 return sharedvfs
513
513
514
514
515 def _readrequires(vfs, allowmissing):
515 def _readrequires(vfs, allowmissing):
516 """reads the require file present at root of this vfs
516 """reads the require file present at root of this vfs
517 and return a set of requirements
517 and return a set of requirements
518
518
519 If allowmissing is True, we suppress ENOENT if raised"""
519 If allowmissing is True, we suppress ENOENT if raised"""
520 # requires file contains a newline-delimited list of
520 # requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
525 try:
525 try:
526 requirements = set(vfs.read(b'requires').splitlines())
526 requirements = set(vfs.read(b'requires').splitlines())
527 except IOError as e:
527 except IOError as e:
528 if not (allowmissing and e.errno == errno.ENOENT):
528 if not (allowmissing and e.errno == errno.ENOENT):
529 raise
529 raise
530 requirements = set()
530 requirements = set()
531 return requirements
531 return requirements
532
532
533
533
534 def makelocalrepository(baseui, path, intents=None):
534 def makelocalrepository(baseui, path, intents=None):
535 """Create a local repository object.
535 """Create a local repository object.
536
536
537 Given arguments needed to construct a local repository, this function
537 Given arguments needed to construct a local repository, this function
538 performs various early repository loading functionality (such as
538 performs various early repository loading functionality (such as
539 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
540 the repository can be opened, derives a type suitable for representing
540 the repository can be opened, derives a type suitable for representing
541 that repository, and returns an instance of it.
541 that repository, and returns an instance of it.
542
542
543 The returned object conforms to the ``repository.completelocalrepository``
543 The returned object conforms to the ``repository.completelocalrepository``
544 interface.
544 interface.
545
545
546 The repository type is derived by calling a series of factory functions
546 The repository type is derived by calling a series of factory functions
547 for each aspect/interface of the final repository. These are defined by
547 for each aspect/interface of the final repository. These are defined by
548 ``REPO_INTERFACES``.
548 ``REPO_INTERFACES``.
549
549
550 Each factory function is called to produce a type implementing a specific
550 Each factory function is called to produce a type implementing a specific
551 interface. The cumulative list of returned types will be combined into a
551 interface. The cumulative list of returned types will be combined into a
552 new type and that type will be instantiated to represent the local
552 new type and that type will be instantiated to represent the local
553 repository.
553 repository.
554
554
555 The factory functions each receive various state that may be consulted
555 The factory functions each receive various state that may be consulted
556 as part of deriving a type.
556 as part of deriving a type.
557
557
558 Extensions should wrap these factory functions to customize repository type
558 Extensions should wrap these factory functions to customize repository type
559 creation. Note that an extension's wrapped function may be called even if
559 creation. Note that an extension's wrapped function may be called even if
560 that extension is not loaded for the repo being constructed. Extensions
560 that extension is not loaded for the repo being constructed. Extensions
561 should check if their ``__name__`` appears in the
561 should check if their ``__name__`` appears in the
562 ``extensionmodulenames`` set passed to the factory function and no-op if
562 ``extensionmodulenames`` set passed to the factory function and no-op if
563 not.
563 not.
564 """
564 """
565 ui = baseui.copy()
565 ui = baseui.copy()
566 # Prevent copying repo configuration.
566 # Prevent copying repo configuration.
567 ui.copy = baseui.copy
567 ui.copy = baseui.copy
568
568
569 # Working directory VFS rooted at repository root.
569 # Working directory VFS rooted at repository root.
570 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
571
571
572 # Main VFS for .hg/ directory.
572 # Main VFS for .hg/ directory.
573 hgpath = wdirvfs.join(b'.hg')
573 hgpath = wdirvfs.join(b'.hg')
574 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
575 # Whether this repository is shared one or not
575 # Whether this repository is shared one or not
576 shared = False
576 shared = False
577 # If this repository is shared, vfs pointing to shared repo
577 # If this repository is shared, vfs pointing to shared repo
578 sharedvfs = None
578 sharedvfs = None
579
579
580 # The .hg/ path should exist and should be a directory. All other
580 # The .hg/ path should exist and should be a directory. All other
581 # cases are errors.
581 # cases are errors.
582 if not hgvfs.isdir():
582 if not hgvfs.isdir():
583 try:
583 try:
584 hgvfs.stat()
584 hgvfs.stat()
585 except OSError as e:
585 except OSError as e:
586 if e.errno != errno.ENOENT:
586 if e.errno != errno.ENOENT:
587 raise
587 raise
588 except ValueError as e:
588 except ValueError as e:
589 # Can be raised on Python 3.8 when path is invalid.
589 # Can be raised on Python 3.8 when path is invalid.
590 raise error.Abort(
590 raise error.Abort(
591 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
591 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
592 )
592 )
593
593
594 raise error.RepoError(_(b'repository %s not found') % path)
594 raise error.RepoError(_(b'repository %s not found') % path)
595
595
596 requirements = _readrequires(hgvfs, True)
596 requirements = _readrequires(hgvfs, True)
597 shared = (
597 shared = (
598 requirementsmod.SHARED_REQUIREMENT in requirements
598 requirementsmod.SHARED_REQUIREMENT in requirements
599 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
599 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
600 )
600 )
601 storevfs = None
601 storevfs = None
602 if shared:
602 if shared:
603 # This is a shared repo
603 # This is a shared repo
604 sharedvfs = _getsharedvfs(hgvfs, requirements)
604 sharedvfs = _getsharedvfs(hgvfs, requirements)
605 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
605 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
606 else:
606 else:
607 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
607 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
608
608
609 # if .hg/requires contains the sharesafe requirement, it means
609 # if .hg/requires contains the sharesafe requirement, it means
610 # there exists a `.hg/store/requires` too and we should read it
610 # there exists a `.hg/store/requires` too and we should read it
611 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
611 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
612 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
612 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
613 # is not present, refer checkrequirementscompat() for that
613 # is not present, refer checkrequirementscompat() for that
614 #
614 #
615 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
615 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
616 # repository was shared the old way. We check the share source .hg/requires
616 # repository was shared the old way. We check the share source .hg/requires
617 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
617 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
618 # to be reshared
618 # to be reshared
619 hint = _(b"see `hg help config.format.use-share-safe` for more information")
619 hint = _(b"see `hg help config.format.use-share-safe` for more information")
620 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
620 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
621
621
622 if (
622 if (
623 shared
623 shared
624 and requirementsmod.SHARESAFE_REQUIREMENT
624 and requirementsmod.SHARESAFE_REQUIREMENT
625 not in _readrequires(sharedvfs, True)
625 not in _readrequires(sharedvfs, True)
626 ):
626 ):
627 mismatch_warn = ui.configbool(
627 mismatch_warn = ui.configbool(
628 b'share', b'safe-mismatch.source-not-safe.warn'
628 b'share', b'safe-mismatch.source-not-safe.warn'
629 )
629 )
630 mismatch_config = ui.config(
630 mismatch_config = ui.config(
631 b'share', b'safe-mismatch.source-not-safe'
631 b'share', b'safe-mismatch.source-not-safe'
632 )
632 )
633 if mismatch_config in (
633 if mismatch_config in (
634 b'downgrade-allow',
634 b'downgrade-allow',
635 b'allow',
635 b'allow',
636 b'downgrade-abort',
636 b'downgrade-abort',
637 ):
637 ):
638 # prevent cyclic import localrepo -> upgrade -> localrepo
638 # prevent cyclic import localrepo -> upgrade -> localrepo
639 from . import upgrade
639 from . import upgrade
640
640
641 upgrade.downgrade_share_to_non_safe(
641 upgrade.downgrade_share_to_non_safe(
642 ui,
642 ui,
643 hgvfs,
643 hgvfs,
644 sharedvfs,
644 sharedvfs,
645 requirements,
645 requirements,
646 mismatch_config,
646 mismatch_config,
647 mismatch_warn,
647 mismatch_warn,
648 )
648 )
649 elif mismatch_config == b'abort':
649 elif mismatch_config == b'abort':
650 raise error.Abort(
650 raise error.Abort(
651 _(b"share source does not support share-safe requirement"),
651 _(b"share source does not support share-safe requirement"),
652 hint=hint,
652 hint=hint,
653 )
653 )
654 else:
654 else:
655 raise error.Abort(
655 raise error.Abort(
656 _(
656 _(
657 b"share-safe mismatch with source.\nUnrecognized"
657 b"share-safe mismatch with source.\nUnrecognized"
658 b" value '%s' of `share.safe-mismatch.source-not-safe`"
658 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" set."
659 b" set."
660 )
660 )
661 % mismatch_config,
661 % mismatch_config,
662 hint=hint,
662 hint=hint,
663 )
663 )
664 else:
664 else:
665 requirements |= _readrequires(storevfs, False)
665 requirements |= _readrequires(storevfs, False)
666 elif shared:
666 elif shared:
667 sourcerequires = _readrequires(sharedvfs, False)
667 sourcerequires = _readrequires(sharedvfs, False)
668 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
668 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
669 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_warn = ui.configbool(
670 mismatch_warn = ui.configbool(
671 b'share', b'safe-mismatch.source-safe.warn'
671 b'share', b'safe-mismatch.source-safe.warn'
672 )
672 )
673 if mismatch_config in (
673 if mismatch_config in (
674 b'upgrade-allow',
674 b'upgrade-allow',
675 b'allow',
675 b'allow',
676 b'upgrade-abort',
676 b'upgrade-abort',
677 ):
677 ):
678 # prevent cyclic import localrepo -> upgrade -> localrepo
678 # prevent cyclic import localrepo -> upgrade -> localrepo
679 from . import upgrade
679 from . import upgrade
680
680
681 upgrade.upgrade_share_to_safe(
681 upgrade.upgrade_share_to_safe(
682 ui,
682 ui,
683 hgvfs,
683 hgvfs,
684 storevfs,
684 storevfs,
685 requirements,
685 requirements,
686 mismatch_config,
686 mismatch_config,
687 mismatch_warn,
687 mismatch_warn,
688 )
688 )
689 elif mismatch_config == b'abort':
689 elif mismatch_config == b'abort':
690 raise error.Abort(
690 raise error.Abort(
691 _(
691 _(
692 b'version mismatch: source uses share-safe'
692 b'version mismatch: source uses share-safe'
693 b' functionality while the current share does not'
693 b' functionality while the current share does not'
694 ),
694 ),
695 hint=hint,
695 hint=hint,
696 )
696 )
697 else:
697 else:
698 raise error.Abort(
698 raise error.Abort(
699 _(
699 _(
700 b"share-safe mismatch with source.\nUnrecognized"
700 b"share-safe mismatch with source.\nUnrecognized"
701 b" value '%s' of `share.safe-mismatch.source-safe` set."
701 b" value '%s' of `share.safe-mismatch.source-safe` set."
702 )
702 )
703 % mismatch_config,
703 % mismatch_config,
704 hint=hint,
704 hint=hint,
705 )
705 )
706
706
707 # The .hg/hgrc file may load extensions or contain config options
707 # The .hg/hgrc file may load extensions or contain config options
708 # that influence repository construction. Attempt to load it and
708 # that influence repository construction. Attempt to load it and
709 # process any new extensions that it may have pulled in.
709 # process any new extensions that it may have pulled in.
710 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
710 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
711 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
711 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
712 extensions.loadall(ui)
712 extensions.loadall(ui)
713 extensions.populateui(ui)
713 extensions.populateui(ui)
714
714
715 # Set of module names of extensions loaded for this repository.
715 # Set of module names of extensions loaded for this repository.
716 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
716 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
717
717
718 supportedrequirements = gathersupportedrequirements(ui)
718 supportedrequirements = gathersupportedrequirements(ui)
719
719
720 # We first validate the requirements are known.
720 # We first validate the requirements are known.
721 ensurerequirementsrecognized(requirements, supportedrequirements)
721 ensurerequirementsrecognized(requirements, supportedrequirements)
722
722
723 # Then we validate that the known set is reasonable to use together.
723 # Then we validate that the known set is reasonable to use together.
724 ensurerequirementscompatible(ui, requirements)
724 ensurerequirementscompatible(ui, requirements)
725
725
726 # TODO there are unhandled edge cases related to opening repositories with
726 # TODO there are unhandled edge cases related to opening repositories with
727 # shared storage. If storage is shared, we should also test for requirements
727 # shared storage. If storage is shared, we should also test for requirements
728 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
728 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
729 # that repo, as that repo may load extensions needed to open it. This is a
729 # that repo, as that repo may load extensions needed to open it. This is a
730 # bit complicated because we don't want the other hgrc to overwrite settings
730 # bit complicated because we don't want the other hgrc to overwrite settings
731 # in this hgrc.
731 # in this hgrc.
732 #
732 #
733 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
733 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
734 # file when sharing repos. But if a requirement is added after the share is
734 # file when sharing repos. But if a requirement is added after the share is
735 # performed, thereby introducing a new requirement for the opener, we may
735 # performed, thereby introducing a new requirement for the opener, we may
736 # will not see that and could encounter a run-time error interacting with
736 # will not see that and could encounter a run-time error interacting with
737 # that shared store since it has an unknown-to-us requirement.
737 # that shared store since it has an unknown-to-us requirement.
738
738
739 # At this point, we know we should be capable of opening the repository.
739 # At this point, we know we should be capable of opening the repository.
740 # Now get on with doing that.
740 # Now get on with doing that.
741
741
742 features = set()
742 features = set()
743
743
744 # The "store" part of the repository holds versioned data. How it is
744 # The "store" part of the repository holds versioned data. How it is
745 # accessed is determined by various requirements. If `shared` or
745 # accessed is determined by various requirements. If `shared` or
746 # `relshared` requirements are present, this indicates current repository
746 # `relshared` requirements are present, this indicates current repository
747 # is a share and store exists in path mentioned in `.hg/sharedpath`
747 # is a share and store exists in path mentioned in `.hg/sharedpath`
748 if shared:
748 if shared:
749 storebasepath = sharedvfs.base
749 storebasepath = sharedvfs.base
750 cachepath = sharedvfs.join(b'cache')
750 cachepath = sharedvfs.join(b'cache')
751 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
751 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
752 else:
752 else:
753 storebasepath = hgvfs.base
753 storebasepath = hgvfs.base
754 cachepath = hgvfs.join(b'cache')
754 cachepath = hgvfs.join(b'cache')
755 wcachepath = hgvfs.join(b'wcache')
755 wcachepath = hgvfs.join(b'wcache')
756
756
757 # The store has changed over time and the exact layout is dictated by
757 # The store has changed over time and the exact layout is dictated by
758 # requirements. The store interface abstracts differences across all
758 # requirements. The store interface abstracts differences across all
759 # of them.
759 # of them.
760 store = makestore(
760 store = makestore(
761 requirements,
761 requirements,
762 storebasepath,
762 storebasepath,
763 lambda base: vfsmod.vfs(base, cacheaudited=True),
763 lambda base: vfsmod.vfs(base, cacheaudited=True),
764 )
764 )
765 hgvfs.createmode = store.createmode
765 hgvfs.createmode = store.createmode
766
766
767 storevfs = store.vfs
767 storevfs = store.vfs
768 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
768 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
769
769
770 if (
770 if (
771 requirementsmod.REVLOGV2_REQUIREMENT in requirements
771 requirementsmod.REVLOGV2_REQUIREMENT in requirements
772 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
772 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
773 ):
773 ):
774 features.add(repository.REPO_FEATURE_SIDE_DATA)
774 features.add(repository.REPO_FEATURE_SIDE_DATA)
775 # the revlogv2 docket introduced race condition that we need to fix
775 # the revlogv2 docket introduced race condition that we need to fix
776 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
776 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
777
777
778 # The cache vfs is used to manage cache files.
778 # The cache vfs is used to manage cache files.
779 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
779 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
780 cachevfs.createmode = store.createmode
780 cachevfs.createmode = store.createmode
781 # The cache vfs is used to manage cache files related to the working copy
781 # The cache vfs is used to manage cache files related to the working copy
782 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
782 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
783 wcachevfs.createmode = store.createmode
783 wcachevfs.createmode = store.createmode
784
784
785 # Now resolve the type for the repository object. We do this by repeatedly
785 # Now resolve the type for the repository object. We do this by repeatedly
786 # calling a factory function to produces types for specific aspects of the
786 # calling a factory function to produces types for specific aspects of the
787 # repo's operation. The aggregate returned types are used as base classes
787 # repo's operation. The aggregate returned types are used as base classes
788 # for a dynamically-derived type, which will represent our new repository.
788 # for a dynamically-derived type, which will represent our new repository.
789
789
790 bases = []
790 bases = []
791 extrastate = {}
791 extrastate = {}
792
792
793 for iface, fn in REPO_INTERFACES:
793 for iface, fn in REPO_INTERFACES:
794 # We pass all potentially useful state to give extensions tons of
794 # We pass all potentially useful state to give extensions tons of
795 # flexibility.
795 # flexibility.
796 typ = fn()(
796 typ = fn()(
797 ui=ui,
797 ui=ui,
798 intents=intents,
798 intents=intents,
799 requirements=requirements,
799 requirements=requirements,
800 features=features,
800 features=features,
801 wdirvfs=wdirvfs,
801 wdirvfs=wdirvfs,
802 hgvfs=hgvfs,
802 hgvfs=hgvfs,
803 store=store,
803 store=store,
804 storevfs=storevfs,
804 storevfs=storevfs,
805 storeoptions=storevfs.options,
805 storeoptions=storevfs.options,
806 cachevfs=cachevfs,
806 cachevfs=cachevfs,
807 wcachevfs=wcachevfs,
807 wcachevfs=wcachevfs,
808 extensionmodulenames=extensionmodulenames,
808 extensionmodulenames=extensionmodulenames,
809 extrastate=extrastate,
809 extrastate=extrastate,
810 baseclasses=bases,
810 baseclasses=bases,
811 )
811 )
812
812
813 if not isinstance(typ, type):
813 if not isinstance(typ, type):
814 raise error.ProgrammingError(
814 raise error.ProgrammingError(
815 b'unable to construct type for %s' % iface
815 b'unable to construct type for %s' % iface
816 )
816 )
817
817
818 bases.append(typ)
818 bases.append(typ)
819
819
820 # type() allows you to use characters in type names that wouldn't be
820 # type() allows you to use characters in type names that wouldn't be
821 # recognized as Python symbols in source code. We abuse that to add
821 # recognized as Python symbols in source code. We abuse that to add
822 # rich information about our constructed repo.
822 # rich information about our constructed repo.
823 name = pycompat.sysstr(
823 name = pycompat.sysstr(
824 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
824 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
825 )
825 )
826
826
827 cls = type(name, tuple(bases), {})
827 cls = type(name, tuple(bases), {})
828
828
829 return cls(
829 return cls(
830 baseui=baseui,
830 baseui=baseui,
831 ui=ui,
831 ui=ui,
832 origroot=path,
832 origroot=path,
833 wdirvfs=wdirvfs,
833 wdirvfs=wdirvfs,
834 hgvfs=hgvfs,
834 hgvfs=hgvfs,
835 requirements=requirements,
835 requirements=requirements,
836 supportedrequirements=supportedrequirements,
836 supportedrequirements=supportedrequirements,
837 sharedpath=storebasepath,
837 sharedpath=storebasepath,
838 store=store,
838 store=store,
839 cachevfs=cachevfs,
839 cachevfs=cachevfs,
840 wcachevfs=wcachevfs,
840 wcachevfs=wcachevfs,
841 features=features,
841 features=features,
842 intents=intents,
842 intents=intents,
843 )
843 )
844
844
845
845
846 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
846 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
847 """Load hgrc files/content into a ui instance.
847 """Load hgrc files/content into a ui instance.
848
848
849 This is called during repository opening to load any additional
849 This is called during repository opening to load any additional
850 config files or settings relevant to the current repository.
850 config files or settings relevant to the current repository.
851
851
852 Returns a bool indicating whether any additional configs were loaded.
852 Returns a bool indicating whether any additional configs were loaded.
853
853
854 Extensions should monkeypatch this function to modify how per-repo
854 Extensions should monkeypatch this function to modify how per-repo
855 configs are loaded. For example, an extension may wish to pull in
855 configs are loaded. For example, an extension may wish to pull in
856 configs from alternate files or sources.
856 configs from alternate files or sources.
857
857
858 sharedvfs is vfs object pointing to source repo if the current one is a
858 sharedvfs is vfs object pointing to source repo if the current one is a
859 shared one
859 shared one
860 """
860 """
861 if not rcutil.use_repo_hgrc():
861 if not rcutil.use_repo_hgrc():
862 return False
862 return False
863
863
864 ret = False
864 ret = False
865 # first load config from shared source if we has to
865 # first load config from shared source if we has to
866 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
866 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
867 try:
867 try:
868 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
868 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
869 ret = True
869 ret = True
870 except IOError:
870 except IOError:
871 pass
871 pass
872
872
873 try:
873 try:
874 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
874 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
875 ret = True
875 ret = True
876 except IOError:
876 except IOError:
877 pass
877 pass
878
878
879 try:
879 try:
880 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
880 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
881 ret = True
881 ret = True
882 except IOError:
882 except IOError:
883 pass
883 pass
884
884
885 return ret
885 return ret
886
886
887
887
888 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
888 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
889 """Perform additional actions after .hg/hgrc is loaded.
889 """Perform additional actions after .hg/hgrc is loaded.
890
890
891 This function is called during repository loading immediately after
891 This function is called during repository loading immediately after
892 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
892 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
893
893
894 The function can be used to validate configs, automatically add
894 The function can be used to validate configs, automatically add
895 options (including extensions) based on requirements, etc.
895 options (including extensions) based on requirements, etc.
896 """
896 """
897
897
898 # Map of requirements to list of extensions to load automatically when
898 # Map of requirements to list of extensions to load automatically when
899 # requirement is present.
899 # requirement is present.
900 autoextensions = {
900 autoextensions = {
901 b'git': [b'git'],
901 b'git': [b'git'],
902 b'largefiles': [b'largefiles'],
902 b'largefiles': [b'largefiles'],
903 b'lfs': [b'lfs'],
903 b'lfs': [b'lfs'],
904 }
904 }
905
905
906 for requirement, names in sorted(autoextensions.items()):
906 for requirement, names in sorted(autoextensions.items()):
907 if requirement not in requirements:
907 if requirement not in requirements:
908 continue
908 continue
909
909
910 for name in names:
910 for name in names:
911 if not ui.hasconfig(b'extensions', name):
911 if not ui.hasconfig(b'extensions', name):
912 ui.setconfig(b'extensions', name, b'', source=b'autoload')
912 ui.setconfig(b'extensions', name, b'', source=b'autoload')
913
913
914
914
915 def gathersupportedrequirements(ui):
915 def gathersupportedrequirements(ui):
916 """Determine the complete set of recognized requirements."""
916 """Determine the complete set of recognized requirements."""
917 # Start with all requirements supported by this file.
917 # Start with all requirements supported by this file.
918 supported = set(localrepository._basesupported)
918 supported = set(localrepository._basesupported)
919
919
920 if dirstate.SUPPORTS_DIRSTATE_V2:
921 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
922
923 # Execute ``featuresetupfuncs`` entries if they belong to an extension
920 # Execute ``featuresetupfuncs`` entries if they belong to an extension
924 # relevant to this ui instance.
921 # relevant to this ui instance.
925 modules = {m.__name__ for n, m in extensions.extensions(ui)}
922 modules = {m.__name__ for n, m in extensions.extensions(ui)}
926
923
927 for fn in featuresetupfuncs:
924 for fn in featuresetupfuncs:
928 if fn.__module__ in modules:
925 if fn.__module__ in modules:
929 fn(ui, supported)
926 fn(ui, supported)
930
927
931 # Add derived requirements from registered compression engines.
928 # Add derived requirements from registered compression engines.
932 for name in util.compengines:
929 for name in util.compengines:
933 engine = util.compengines[name]
930 engine = util.compengines[name]
934 if engine.available() and engine.revlogheader():
931 if engine.available() and engine.revlogheader():
935 supported.add(b'exp-compression-%s' % name)
932 supported.add(b'exp-compression-%s' % name)
936 if engine.name() == b'zstd':
933 if engine.name() == b'zstd':
937 supported.add(b'revlog-compression-zstd')
934 supported.add(b'revlog-compression-zstd')
938
935
939 return supported
936 return supported
940
937
941
938
942 def ensurerequirementsrecognized(requirements, supported):
939 def ensurerequirementsrecognized(requirements, supported):
943 """Validate that a set of local requirements is recognized.
940 """Validate that a set of local requirements is recognized.
944
941
945 Receives a set of requirements. Raises an ``error.RepoError`` if there
942 Receives a set of requirements. Raises an ``error.RepoError`` if there
946 exists any requirement in that set that currently loaded code doesn't
943 exists any requirement in that set that currently loaded code doesn't
947 recognize.
944 recognize.
948
945
949 Returns a set of supported requirements.
946 Returns a set of supported requirements.
950 """
947 """
951 missing = set()
948 missing = set()
952
949
953 for requirement in requirements:
950 for requirement in requirements:
954 if requirement in supported:
951 if requirement in supported:
955 continue
952 continue
956
953
957 if not requirement or not requirement[0:1].isalnum():
954 if not requirement or not requirement[0:1].isalnum():
958 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
955 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
959
956
960 missing.add(requirement)
957 missing.add(requirement)
961
958
962 if missing:
959 if missing:
963 raise error.RequirementError(
960 raise error.RequirementError(
964 _(b'repository requires features unknown to this Mercurial: %s')
961 _(b'repository requires features unknown to this Mercurial: %s')
965 % b' '.join(sorted(missing)),
962 % b' '.join(sorted(missing)),
966 hint=_(
963 hint=_(
967 b'see https://mercurial-scm.org/wiki/MissingRequirement '
964 b'see https://mercurial-scm.org/wiki/MissingRequirement '
968 b'for more information'
965 b'for more information'
969 ),
966 ),
970 )
967 )
971
968
972
969
973 def ensurerequirementscompatible(ui, requirements):
970 def ensurerequirementscompatible(ui, requirements):
974 """Validates that a set of recognized requirements is mutually compatible.
971 """Validates that a set of recognized requirements is mutually compatible.
975
972
976 Some requirements may not be compatible with others or require
973 Some requirements may not be compatible with others or require
977 config options that aren't enabled. This function is called during
974 config options that aren't enabled. This function is called during
978 repository opening to ensure that the set of requirements needed
975 repository opening to ensure that the set of requirements needed
979 to open a repository is sane and compatible with config options.
976 to open a repository is sane and compatible with config options.
980
977
981 Extensions can monkeypatch this function to perform additional
978 Extensions can monkeypatch this function to perform additional
982 checking.
979 checking.
983
980
984 ``error.RepoError`` should be raised on failure.
981 ``error.RepoError`` should be raised on failure.
985 """
982 """
986 if (
983 if (
987 requirementsmod.SPARSE_REQUIREMENT in requirements
984 requirementsmod.SPARSE_REQUIREMENT in requirements
988 and not sparse.enabled
985 and not sparse.enabled
989 ):
986 ):
990 raise error.RepoError(
987 raise error.RepoError(
991 _(
988 _(
992 b'repository is using sparse feature but '
989 b'repository is using sparse feature but '
993 b'sparse is not enabled; enable the '
990 b'sparse is not enabled; enable the '
994 b'"sparse" extensions to access'
991 b'"sparse" extensions to access'
995 )
992 )
996 )
993 )
997
994
998
995
999 def makestore(requirements, path, vfstype):
996 def makestore(requirements, path, vfstype):
1000 """Construct a storage object for a repository."""
997 """Construct a storage object for a repository."""
1001 if requirementsmod.STORE_REQUIREMENT in requirements:
998 if requirementsmod.STORE_REQUIREMENT in requirements:
1002 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
999 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1003 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1000 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1004 return storemod.fncachestore(path, vfstype, dotencode)
1001 return storemod.fncachestore(path, vfstype, dotencode)
1005
1002
1006 return storemod.encodedstore(path, vfstype)
1003 return storemod.encodedstore(path, vfstype)
1007
1004
1008 return storemod.basicstore(path, vfstype)
1005 return storemod.basicstore(path, vfstype)
1009
1006
1010
1007
1011 def resolvestorevfsoptions(ui, requirements, features):
1008 def resolvestorevfsoptions(ui, requirements, features):
1012 """Resolve the options to pass to the store vfs opener.
1009 """Resolve the options to pass to the store vfs opener.
1013
1010
1014 The returned dict is used to influence behavior of the storage layer.
1011 The returned dict is used to influence behavior of the storage layer.
1015 """
1012 """
1016 options = {}
1013 options = {}
1017
1014
1018 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1015 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1019 options[b'treemanifest'] = True
1016 options[b'treemanifest'] = True
1020
1017
1021 # experimental config: format.manifestcachesize
1018 # experimental config: format.manifestcachesize
1022 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1019 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1023 if manifestcachesize is not None:
1020 if manifestcachesize is not None:
1024 options[b'manifestcachesize'] = manifestcachesize
1021 options[b'manifestcachesize'] = manifestcachesize
1025
1022
1026 # In the absence of another requirement superseding a revlog-related
1023 # In the absence of another requirement superseding a revlog-related
1027 # requirement, we have to assume the repo is using revlog version 0.
1024 # requirement, we have to assume the repo is using revlog version 0.
1028 # This revlog format is super old and we don't bother trying to parse
1025 # This revlog format is super old and we don't bother trying to parse
1029 # opener options for it because those options wouldn't do anything
1026 # opener options for it because those options wouldn't do anything
1030 # meaningful on such old repos.
1027 # meaningful on such old repos.
1031 if (
1028 if (
1032 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1029 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1033 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1030 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1034 ):
1031 ):
1035 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1032 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1036 else: # explicitly mark repo as using revlogv0
1033 else: # explicitly mark repo as using revlogv0
1037 options[b'revlogv0'] = True
1034 options[b'revlogv0'] = True
1038
1035
1039 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1036 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1040 options[b'copies-storage'] = b'changeset-sidedata'
1037 options[b'copies-storage'] = b'changeset-sidedata'
1041 else:
1038 else:
1042 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1039 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1043 copiesextramode = (b'changeset-only', b'compatibility')
1040 copiesextramode = (b'changeset-only', b'compatibility')
1044 if writecopiesto in copiesextramode:
1041 if writecopiesto in copiesextramode:
1045 options[b'copies-storage'] = b'extra'
1042 options[b'copies-storage'] = b'extra'
1046
1043
1047 return options
1044 return options
1048
1045
1049
1046
1050 def resolverevlogstorevfsoptions(ui, requirements, features):
1047 def resolverevlogstorevfsoptions(ui, requirements, features):
1051 """Resolve opener options specific to revlogs."""
1048 """Resolve opener options specific to revlogs."""
1052
1049
1053 options = {}
1050 options = {}
1054 options[b'flagprocessors'] = {}
1051 options[b'flagprocessors'] = {}
1055
1052
1056 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1053 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1057 options[b'revlogv1'] = True
1054 options[b'revlogv1'] = True
1058 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1055 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1059 options[b'revlogv2'] = True
1056 options[b'revlogv2'] = True
1060 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1057 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1061 options[b'changelogv2'] = True
1058 options[b'changelogv2'] = True
1062
1059
1063 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1060 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1064 options[b'generaldelta'] = True
1061 options[b'generaldelta'] = True
1065
1062
1066 # experimental config: format.chunkcachesize
1063 # experimental config: format.chunkcachesize
1067 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1064 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1068 if chunkcachesize is not None:
1065 if chunkcachesize is not None:
1069 options[b'chunkcachesize'] = chunkcachesize
1066 options[b'chunkcachesize'] = chunkcachesize
1070
1067
1071 deltabothparents = ui.configbool(
1068 deltabothparents = ui.configbool(
1072 b'storage', b'revlog.optimize-delta-parent-choice'
1069 b'storage', b'revlog.optimize-delta-parent-choice'
1073 )
1070 )
1074 options[b'deltabothparents'] = deltabothparents
1071 options[b'deltabothparents'] = deltabothparents
1075
1072
1076 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1073 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1077 options[b'issue6528.fix-incoming'] = issue6528
1074 options[b'issue6528.fix-incoming'] = issue6528
1078
1075
1079 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1076 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1080 lazydeltabase = False
1077 lazydeltabase = False
1081 if lazydelta:
1078 if lazydelta:
1082 lazydeltabase = ui.configbool(
1079 lazydeltabase = ui.configbool(
1083 b'storage', b'revlog.reuse-external-delta-parent'
1080 b'storage', b'revlog.reuse-external-delta-parent'
1084 )
1081 )
1085 if lazydeltabase is None:
1082 if lazydeltabase is None:
1086 lazydeltabase = not scmutil.gddeltaconfig(ui)
1083 lazydeltabase = not scmutil.gddeltaconfig(ui)
1087 options[b'lazydelta'] = lazydelta
1084 options[b'lazydelta'] = lazydelta
1088 options[b'lazydeltabase'] = lazydeltabase
1085 options[b'lazydeltabase'] = lazydeltabase
1089
1086
1090 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1087 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1091 if 0 <= chainspan:
1088 if 0 <= chainspan:
1092 options[b'maxdeltachainspan'] = chainspan
1089 options[b'maxdeltachainspan'] = chainspan
1093
1090
1094 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1091 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1095 if mmapindexthreshold is not None:
1092 if mmapindexthreshold is not None:
1096 options[b'mmapindexthreshold'] = mmapindexthreshold
1093 options[b'mmapindexthreshold'] = mmapindexthreshold
1097
1094
1098 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1095 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1099 srdensitythres = float(
1096 srdensitythres = float(
1100 ui.config(b'experimental', b'sparse-read.density-threshold')
1097 ui.config(b'experimental', b'sparse-read.density-threshold')
1101 )
1098 )
1102 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1099 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1103 options[b'with-sparse-read'] = withsparseread
1100 options[b'with-sparse-read'] = withsparseread
1104 options[b'sparse-read-density-threshold'] = srdensitythres
1101 options[b'sparse-read-density-threshold'] = srdensitythres
1105 options[b'sparse-read-min-gap-size'] = srmingapsize
1102 options[b'sparse-read-min-gap-size'] = srmingapsize
1106
1103
1107 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1104 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1108 options[b'sparse-revlog'] = sparserevlog
1105 options[b'sparse-revlog'] = sparserevlog
1109 if sparserevlog:
1106 if sparserevlog:
1110 options[b'generaldelta'] = True
1107 options[b'generaldelta'] = True
1111
1108
1112 maxchainlen = None
1109 maxchainlen = None
1113 if sparserevlog:
1110 if sparserevlog:
1114 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1111 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1115 # experimental config: format.maxchainlen
1112 # experimental config: format.maxchainlen
1116 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1113 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1117 if maxchainlen is not None:
1114 if maxchainlen is not None:
1118 options[b'maxchainlen'] = maxchainlen
1115 options[b'maxchainlen'] = maxchainlen
1119
1116
1120 for r in requirements:
1117 for r in requirements:
1121 # we allow multiple compression engine requirement to co-exist because
1118 # we allow multiple compression engine requirement to co-exist because
1122 # strickly speaking, revlog seems to support mixed compression style.
1119 # strickly speaking, revlog seems to support mixed compression style.
1123 #
1120 #
1124 # The compression used for new entries will be "the last one"
1121 # The compression used for new entries will be "the last one"
1125 prefix = r.startswith
1122 prefix = r.startswith
1126 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1123 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1127 options[b'compengine'] = r.split(b'-', 2)[2]
1124 options[b'compengine'] = r.split(b'-', 2)[2]
1128
1125
1129 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1126 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1130 if options[b'zlib.level'] is not None:
1127 if options[b'zlib.level'] is not None:
1131 if not (0 <= options[b'zlib.level'] <= 9):
1128 if not (0 <= options[b'zlib.level'] <= 9):
1132 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1129 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1133 raise error.Abort(msg % options[b'zlib.level'])
1130 raise error.Abort(msg % options[b'zlib.level'])
1134 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1131 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1135 if options[b'zstd.level'] is not None:
1132 if options[b'zstd.level'] is not None:
1136 if not (0 <= options[b'zstd.level'] <= 22):
1133 if not (0 <= options[b'zstd.level'] <= 22):
1137 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1134 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1138 raise error.Abort(msg % options[b'zstd.level'])
1135 raise error.Abort(msg % options[b'zstd.level'])
1139
1136
1140 if requirementsmod.NARROW_REQUIREMENT in requirements:
1137 if requirementsmod.NARROW_REQUIREMENT in requirements:
1141 options[b'enableellipsis'] = True
1138 options[b'enableellipsis'] = True
1142
1139
1143 if ui.configbool(b'experimental', b'rust.index'):
1140 if ui.configbool(b'experimental', b'rust.index'):
1144 options[b'rust.index'] = True
1141 options[b'rust.index'] = True
1145 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1142 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1146 slow_path = ui.config(
1143 slow_path = ui.config(
1147 b'storage', b'revlog.persistent-nodemap.slow-path'
1144 b'storage', b'revlog.persistent-nodemap.slow-path'
1148 )
1145 )
1149 if slow_path not in (b'allow', b'warn', b'abort'):
1146 if slow_path not in (b'allow', b'warn', b'abort'):
1150 default = ui.config_default(
1147 default = ui.config_default(
1151 b'storage', b'revlog.persistent-nodemap.slow-path'
1148 b'storage', b'revlog.persistent-nodemap.slow-path'
1152 )
1149 )
1153 msg = _(
1150 msg = _(
1154 b'unknown value for config '
1151 b'unknown value for config '
1155 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1152 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1156 )
1153 )
1157 ui.warn(msg % slow_path)
1154 ui.warn(msg % slow_path)
1158 if not ui.quiet:
1155 if not ui.quiet:
1159 ui.warn(_(b'falling back to default value: %s\n') % default)
1156 ui.warn(_(b'falling back to default value: %s\n') % default)
1160 slow_path = default
1157 slow_path = default
1161
1158
1162 msg = _(
1159 msg = _(
1163 b"accessing `persistent-nodemap` repository without associated "
1160 b"accessing `persistent-nodemap` repository without associated "
1164 b"fast implementation."
1161 b"fast implementation."
1165 )
1162 )
1166 hint = _(
1163 hint = _(
1167 b"check `hg help config.format.use-persistent-nodemap` "
1164 b"check `hg help config.format.use-persistent-nodemap` "
1168 b"for details"
1165 b"for details"
1169 )
1166 )
1170 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1167 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1171 if slow_path == b'warn':
1168 if slow_path == b'warn':
1172 msg = b"warning: " + msg + b'\n'
1169 msg = b"warning: " + msg + b'\n'
1173 ui.warn(msg)
1170 ui.warn(msg)
1174 if not ui.quiet:
1171 if not ui.quiet:
1175 hint = b'(' + hint + b')\n'
1172 hint = b'(' + hint + b')\n'
1176 ui.warn(hint)
1173 ui.warn(hint)
1177 if slow_path == b'abort':
1174 if slow_path == b'abort':
1178 raise error.Abort(msg, hint=hint)
1175 raise error.Abort(msg, hint=hint)
1179 options[b'persistent-nodemap'] = True
1176 options[b'persistent-nodemap'] = True
1180 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1177 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1181 options[b'persistent-nodemap.mmap'] = True
1178 options[b'persistent-nodemap.mmap'] = True
1182 if ui.configbool(b'devel', b'persistent-nodemap'):
1179 if ui.configbool(b'devel', b'persistent-nodemap'):
1183 options[b'devel-force-nodemap'] = True
1180 options[b'devel-force-nodemap'] = True
1184
1181
1185 return options
1182 return options
1186
1183
1187
1184
1188 def makemain(**kwargs):
1185 def makemain(**kwargs):
1189 """Produce a type conforming to ``ilocalrepositorymain``."""
1186 """Produce a type conforming to ``ilocalrepositorymain``."""
1190 return localrepository
1187 return localrepository
1191
1188
1192
1189
1193 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1190 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1194 class revlogfilestorage(object):
1191 class revlogfilestorage(object):
1195 """File storage when using revlogs."""
1192 """File storage when using revlogs."""
1196
1193
1197 def file(self, path):
1194 def file(self, path):
1198 if path.startswith(b'/'):
1195 if path.startswith(b'/'):
1199 path = path[1:]
1196 path = path[1:]
1200
1197
1201 return filelog.filelog(self.svfs, path)
1198 return filelog.filelog(self.svfs, path)
1202
1199
1203
1200
1204 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1201 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1205 class revlognarrowfilestorage(object):
1202 class revlognarrowfilestorage(object):
1206 """File storage when using revlogs and narrow files."""
1203 """File storage when using revlogs and narrow files."""
1207
1204
1208 def file(self, path):
1205 def file(self, path):
1209 if path.startswith(b'/'):
1206 if path.startswith(b'/'):
1210 path = path[1:]
1207 path = path[1:]
1211
1208
1212 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1209 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1213
1210
1214
1211
1215 def makefilestorage(requirements, features, **kwargs):
1212 def makefilestorage(requirements, features, **kwargs):
1216 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1213 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1217 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1214 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1218 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1215 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1219
1216
1220 if requirementsmod.NARROW_REQUIREMENT in requirements:
1217 if requirementsmod.NARROW_REQUIREMENT in requirements:
1221 return revlognarrowfilestorage
1218 return revlognarrowfilestorage
1222 else:
1219 else:
1223 return revlogfilestorage
1220 return revlogfilestorage
1224
1221
1225
1222
1226 # List of repository interfaces and factory functions for them. Each
1223 # List of repository interfaces and factory functions for them. Each
1227 # will be called in order during ``makelocalrepository()`` to iteratively
1224 # will be called in order during ``makelocalrepository()`` to iteratively
1228 # derive the final type for a local repository instance. We capture the
1225 # derive the final type for a local repository instance. We capture the
1229 # function as a lambda so we don't hold a reference and the module-level
1226 # function as a lambda so we don't hold a reference and the module-level
1230 # functions can be wrapped.
1227 # functions can be wrapped.
1231 REPO_INTERFACES = [
1228 REPO_INTERFACES = [
1232 (repository.ilocalrepositorymain, lambda: makemain),
1229 (repository.ilocalrepositorymain, lambda: makemain),
1233 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1230 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1234 ]
1231 ]
1235
1232
1236
1233
1237 @interfaceutil.implementer(repository.ilocalrepositorymain)
1234 @interfaceutil.implementer(repository.ilocalrepositorymain)
1238 class localrepository(object):
1235 class localrepository(object):
1239 """Main class for representing local repositories.
1236 """Main class for representing local repositories.
1240
1237
1241 All local repositories are instances of this class.
1238 All local repositories are instances of this class.
1242
1239
1243 Constructed on its own, instances of this class are not usable as
1240 Constructed on its own, instances of this class are not usable as
1244 repository objects. To obtain a usable repository object, call
1241 repository objects. To obtain a usable repository object, call
1245 ``hg.repository()``, ``localrepo.instance()``, or
1242 ``hg.repository()``, ``localrepo.instance()``, or
1246 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1243 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1247 ``instance()`` adds support for creating new repositories.
1244 ``instance()`` adds support for creating new repositories.
1248 ``hg.repository()`` adds more extension integration, including calling
1245 ``hg.repository()`` adds more extension integration, including calling
1249 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1246 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1250 used.
1247 used.
1251 """
1248 """
1252
1249
1253 # obsolete experimental requirements:
1250 # obsolete experimental requirements:
1254 # - manifestv2: An experimental new manifest format that allowed
1251 # - manifestv2: An experimental new manifest format that allowed
1255 # for stem compression of long paths. Experiment ended up not
1252 # for stem compression of long paths. Experiment ended up not
1256 # being successful (repository sizes went up due to worse delta
1253 # being successful (repository sizes went up due to worse delta
1257 # chains), and the code was deleted in 4.6.
1254 # chains), and the code was deleted in 4.6.
1258 supportedformats = {
1255 supportedformats = {
1259 requirementsmod.REVLOGV1_REQUIREMENT,
1256 requirementsmod.REVLOGV1_REQUIREMENT,
1260 requirementsmod.GENERALDELTA_REQUIREMENT,
1257 requirementsmod.GENERALDELTA_REQUIREMENT,
1261 requirementsmod.TREEMANIFEST_REQUIREMENT,
1258 requirementsmod.TREEMANIFEST_REQUIREMENT,
1262 requirementsmod.COPIESSDC_REQUIREMENT,
1259 requirementsmod.COPIESSDC_REQUIREMENT,
1263 requirementsmod.REVLOGV2_REQUIREMENT,
1260 requirementsmod.REVLOGV2_REQUIREMENT,
1264 requirementsmod.CHANGELOGV2_REQUIREMENT,
1261 requirementsmod.CHANGELOGV2_REQUIREMENT,
1265 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1262 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1266 requirementsmod.NODEMAP_REQUIREMENT,
1263 requirementsmod.NODEMAP_REQUIREMENT,
1267 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1264 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1268 requirementsmod.SHARESAFE_REQUIREMENT,
1265 requirementsmod.SHARESAFE_REQUIREMENT,
1266 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1269 }
1267 }
1270 _basesupported = supportedformats | {
1268 _basesupported = supportedformats | {
1271 requirementsmod.STORE_REQUIREMENT,
1269 requirementsmod.STORE_REQUIREMENT,
1272 requirementsmod.FNCACHE_REQUIREMENT,
1270 requirementsmod.FNCACHE_REQUIREMENT,
1273 requirementsmod.SHARED_REQUIREMENT,
1271 requirementsmod.SHARED_REQUIREMENT,
1274 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1272 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1275 requirementsmod.DOTENCODE_REQUIREMENT,
1273 requirementsmod.DOTENCODE_REQUIREMENT,
1276 requirementsmod.SPARSE_REQUIREMENT,
1274 requirementsmod.SPARSE_REQUIREMENT,
1277 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1275 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1278 }
1276 }
1279
1277
1280 # list of prefix for file which can be written without 'wlock'
1278 # list of prefix for file which can be written without 'wlock'
1281 # Extensions should extend this list when needed
1279 # Extensions should extend this list when needed
1282 _wlockfreeprefix = {
1280 _wlockfreeprefix = {
1283 # We migh consider requiring 'wlock' for the next
1281 # We migh consider requiring 'wlock' for the next
1284 # two, but pretty much all the existing code assume
1282 # two, but pretty much all the existing code assume
1285 # wlock is not needed so we keep them excluded for
1283 # wlock is not needed so we keep them excluded for
1286 # now.
1284 # now.
1287 b'hgrc',
1285 b'hgrc',
1288 b'requires',
1286 b'requires',
1289 # XXX cache is a complicatged business someone
1287 # XXX cache is a complicatged business someone
1290 # should investigate this in depth at some point
1288 # should investigate this in depth at some point
1291 b'cache/',
1289 b'cache/',
1292 # XXX shouldn't be dirstate covered by the wlock?
1290 # XXX shouldn't be dirstate covered by the wlock?
1293 b'dirstate',
1291 b'dirstate',
1294 # XXX bisect was still a bit too messy at the time
1292 # XXX bisect was still a bit too messy at the time
1295 # this changeset was introduced. Someone should fix
1293 # this changeset was introduced. Someone should fix
1296 # the remainig bit and drop this line
1294 # the remainig bit and drop this line
1297 b'bisect.state',
1295 b'bisect.state',
1298 }
1296 }
1299
1297
1300 def __init__(
1298 def __init__(
1301 self,
1299 self,
1302 baseui,
1300 baseui,
1303 ui,
1301 ui,
1304 origroot,
1302 origroot,
1305 wdirvfs,
1303 wdirvfs,
1306 hgvfs,
1304 hgvfs,
1307 requirements,
1305 requirements,
1308 supportedrequirements,
1306 supportedrequirements,
1309 sharedpath,
1307 sharedpath,
1310 store,
1308 store,
1311 cachevfs,
1309 cachevfs,
1312 wcachevfs,
1310 wcachevfs,
1313 features,
1311 features,
1314 intents=None,
1312 intents=None,
1315 ):
1313 ):
1316 """Create a new local repository instance.
1314 """Create a new local repository instance.
1317
1315
1318 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1316 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1319 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1317 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1320 object.
1318 object.
1321
1319
1322 Arguments:
1320 Arguments:
1323
1321
1324 baseui
1322 baseui
1325 ``ui.ui`` instance that ``ui`` argument was based off of.
1323 ``ui.ui`` instance that ``ui`` argument was based off of.
1326
1324
1327 ui
1325 ui
1328 ``ui.ui`` instance for use by the repository.
1326 ``ui.ui`` instance for use by the repository.
1329
1327
1330 origroot
1328 origroot
1331 ``bytes`` path to working directory root of this repository.
1329 ``bytes`` path to working directory root of this repository.
1332
1330
1333 wdirvfs
1331 wdirvfs
1334 ``vfs.vfs`` rooted at the working directory.
1332 ``vfs.vfs`` rooted at the working directory.
1335
1333
1336 hgvfs
1334 hgvfs
1337 ``vfs.vfs`` rooted at .hg/
1335 ``vfs.vfs`` rooted at .hg/
1338
1336
1339 requirements
1337 requirements
1340 ``set`` of bytestrings representing repository opening requirements.
1338 ``set`` of bytestrings representing repository opening requirements.
1341
1339
1342 supportedrequirements
1340 supportedrequirements
1343 ``set`` of bytestrings representing repository requirements that we
1341 ``set`` of bytestrings representing repository requirements that we
1344 know how to open. May be a supetset of ``requirements``.
1342 know how to open. May be a supetset of ``requirements``.
1345
1343
1346 sharedpath
1344 sharedpath
1347 ``bytes`` Defining path to storage base directory. Points to a
1345 ``bytes`` Defining path to storage base directory. Points to a
1348 ``.hg/`` directory somewhere.
1346 ``.hg/`` directory somewhere.
1349
1347
1350 store
1348 store
1351 ``store.basicstore`` (or derived) instance providing access to
1349 ``store.basicstore`` (or derived) instance providing access to
1352 versioned storage.
1350 versioned storage.
1353
1351
1354 cachevfs
1352 cachevfs
1355 ``vfs.vfs`` used for cache files.
1353 ``vfs.vfs`` used for cache files.
1356
1354
1357 wcachevfs
1355 wcachevfs
1358 ``vfs.vfs`` used for cache files related to the working copy.
1356 ``vfs.vfs`` used for cache files related to the working copy.
1359
1357
1360 features
1358 features
1361 ``set`` of bytestrings defining features/capabilities of this
1359 ``set`` of bytestrings defining features/capabilities of this
1362 instance.
1360 instance.
1363
1361
1364 intents
1362 intents
1365 ``set`` of system strings indicating what this repo will be used
1363 ``set`` of system strings indicating what this repo will be used
1366 for.
1364 for.
1367 """
1365 """
1368 self.baseui = baseui
1366 self.baseui = baseui
1369 self.ui = ui
1367 self.ui = ui
1370 self.origroot = origroot
1368 self.origroot = origroot
1371 # vfs rooted at working directory.
1369 # vfs rooted at working directory.
1372 self.wvfs = wdirvfs
1370 self.wvfs = wdirvfs
1373 self.root = wdirvfs.base
1371 self.root = wdirvfs.base
1374 # vfs rooted at .hg/. Used to access most non-store paths.
1372 # vfs rooted at .hg/. Used to access most non-store paths.
1375 self.vfs = hgvfs
1373 self.vfs = hgvfs
1376 self.path = hgvfs.base
1374 self.path = hgvfs.base
1377 self.requirements = requirements
1375 self.requirements = requirements
1378 self.nodeconstants = sha1nodeconstants
1376 self.nodeconstants = sha1nodeconstants
1379 self.nullid = self.nodeconstants.nullid
1377 self.nullid = self.nodeconstants.nullid
1380 self.supported = supportedrequirements
1378 self.supported = supportedrequirements
1381 self.sharedpath = sharedpath
1379 self.sharedpath = sharedpath
1382 self.store = store
1380 self.store = store
1383 self.cachevfs = cachevfs
1381 self.cachevfs = cachevfs
1384 self.wcachevfs = wcachevfs
1382 self.wcachevfs = wcachevfs
1385 self.features = features
1383 self.features = features
1386
1384
1387 self.filtername = None
1385 self.filtername = None
1388
1386
1389 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1387 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1390 b'devel', b'check-locks'
1388 b'devel', b'check-locks'
1391 ):
1389 ):
1392 self.vfs.audit = self._getvfsward(self.vfs.audit)
1390 self.vfs.audit = self._getvfsward(self.vfs.audit)
1393 # A list of callback to shape the phase if no data were found.
1391 # A list of callback to shape the phase if no data were found.
1394 # Callback are in the form: func(repo, roots) --> processed root.
1392 # Callback are in the form: func(repo, roots) --> processed root.
1395 # This list it to be filled by extension during repo setup
1393 # This list it to be filled by extension during repo setup
1396 self._phasedefaults = []
1394 self._phasedefaults = []
1397
1395
1398 color.setup(self.ui)
1396 color.setup(self.ui)
1399
1397
1400 self.spath = self.store.path
1398 self.spath = self.store.path
1401 self.svfs = self.store.vfs
1399 self.svfs = self.store.vfs
1402 self.sjoin = self.store.join
1400 self.sjoin = self.store.join
1403 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1401 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1404 b'devel', b'check-locks'
1402 b'devel', b'check-locks'
1405 ):
1403 ):
1406 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1404 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1407 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1405 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1408 else: # standard vfs
1406 else: # standard vfs
1409 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1407 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1410
1408
1411 self._dirstatevalidatewarned = False
1409 self._dirstatevalidatewarned = False
1412
1410
1413 self._branchcaches = branchmap.BranchMapCache()
1411 self._branchcaches = branchmap.BranchMapCache()
1414 self._revbranchcache = None
1412 self._revbranchcache = None
1415 self._filterpats = {}
1413 self._filterpats = {}
1416 self._datafilters = {}
1414 self._datafilters = {}
1417 self._transref = self._lockref = self._wlockref = None
1415 self._transref = self._lockref = self._wlockref = None
1418
1416
1419 # A cache for various files under .hg/ that tracks file changes,
1417 # A cache for various files under .hg/ that tracks file changes,
1420 # (used by the filecache decorator)
1418 # (used by the filecache decorator)
1421 #
1419 #
1422 # Maps a property name to its util.filecacheentry
1420 # Maps a property name to its util.filecacheentry
1423 self._filecache = {}
1421 self._filecache = {}
1424
1422
1425 # hold sets of revision to be filtered
1423 # hold sets of revision to be filtered
1426 # should be cleared when something might have changed the filter value:
1424 # should be cleared when something might have changed the filter value:
1427 # - new changesets,
1425 # - new changesets,
1428 # - phase change,
1426 # - phase change,
1429 # - new obsolescence marker,
1427 # - new obsolescence marker,
1430 # - working directory parent change,
1428 # - working directory parent change,
1431 # - bookmark changes
1429 # - bookmark changes
1432 self.filteredrevcache = {}
1430 self.filteredrevcache = {}
1433
1431
1434 # post-dirstate-status hooks
1432 # post-dirstate-status hooks
1435 self._postdsstatus = []
1433 self._postdsstatus = []
1436
1434
1437 # generic mapping between names and nodes
1435 # generic mapping between names and nodes
1438 self.names = namespaces.namespaces()
1436 self.names = namespaces.namespaces()
1439
1437
1440 # Key to signature value.
1438 # Key to signature value.
1441 self._sparsesignaturecache = {}
1439 self._sparsesignaturecache = {}
1442 # Signature to cached matcher instance.
1440 # Signature to cached matcher instance.
1443 self._sparsematchercache = {}
1441 self._sparsematchercache = {}
1444
1442
1445 self._extrafilterid = repoview.extrafilter(ui)
1443 self._extrafilterid = repoview.extrafilter(ui)
1446
1444
1447 self.filecopiesmode = None
1445 self.filecopiesmode = None
1448 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1446 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1449 self.filecopiesmode = b'changeset-sidedata'
1447 self.filecopiesmode = b'changeset-sidedata'
1450
1448
1451 self._wanted_sidedata = set()
1449 self._wanted_sidedata = set()
1452 self._sidedata_computers = {}
1450 self._sidedata_computers = {}
1453 sidedatamod.set_sidedata_spec_for_repo(self)
1451 sidedatamod.set_sidedata_spec_for_repo(self)
1454
1452
1455 def _getvfsward(self, origfunc):
1453 def _getvfsward(self, origfunc):
1456 """build a ward for self.vfs"""
1454 """build a ward for self.vfs"""
1457 rref = weakref.ref(self)
1455 rref = weakref.ref(self)
1458
1456
1459 def checkvfs(path, mode=None):
1457 def checkvfs(path, mode=None):
1460 ret = origfunc(path, mode=mode)
1458 ret = origfunc(path, mode=mode)
1461 repo = rref()
1459 repo = rref()
1462 if (
1460 if (
1463 repo is None
1461 repo is None
1464 or not util.safehasattr(repo, b'_wlockref')
1462 or not util.safehasattr(repo, b'_wlockref')
1465 or not util.safehasattr(repo, b'_lockref')
1463 or not util.safehasattr(repo, b'_lockref')
1466 ):
1464 ):
1467 return
1465 return
1468 if mode in (None, b'r', b'rb'):
1466 if mode in (None, b'r', b'rb'):
1469 return
1467 return
1470 if path.startswith(repo.path):
1468 if path.startswith(repo.path):
1471 # truncate name relative to the repository (.hg)
1469 # truncate name relative to the repository (.hg)
1472 path = path[len(repo.path) + 1 :]
1470 path = path[len(repo.path) + 1 :]
1473 if path.startswith(b'cache/'):
1471 if path.startswith(b'cache/'):
1474 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1472 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1475 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1473 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1476 # path prefixes covered by 'lock'
1474 # path prefixes covered by 'lock'
1477 vfs_path_prefixes = (
1475 vfs_path_prefixes = (
1478 b'journal.',
1476 b'journal.',
1479 b'undo.',
1477 b'undo.',
1480 b'strip-backup/',
1478 b'strip-backup/',
1481 b'cache/',
1479 b'cache/',
1482 )
1480 )
1483 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1481 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1484 if repo._currentlock(repo._lockref) is None:
1482 if repo._currentlock(repo._lockref) is None:
1485 repo.ui.develwarn(
1483 repo.ui.develwarn(
1486 b'write with no lock: "%s"' % path,
1484 b'write with no lock: "%s"' % path,
1487 stacklevel=3,
1485 stacklevel=3,
1488 config=b'check-locks',
1486 config=b'check-locks',
1489 )
1487 )
1490 elif repo._currentlock(repo._wlockref) is None:
1488 elif repo._currentlock(repo._wlockref) is None:
1491 # rest of vfs files are covered by 'wlock'
1489 # rest of vfs files are covered by 'wlock'
1492 #
1490 #
1493 # exclude special files
1491 # exclude special files
1494 for prefix in self._wlockfreeprefix:
1492 for prefix in self._wlockfreeprefix:
1495 if path.startswith(prefix):
1493 if path.startswith(prefix):
1496 return
1494 return
1497 repo.ui.develwarn(
1495 repo.ui.develwarn(
1498 b'write with no wlock: "%s"' % path,
1496 b'write with no wlock: "%s"' % path,
1499 stacklevel=3,
1497 stacklevel=3,
1500 config=b'check-locks',
1498 config=b'check-locks',
1501 )
1499 )
1502 return ret
1500 return ret
1503
1501
1504 return checkvfs
1502 return checkvfs
1505
1503
1506 def _getsvfsward(self, origfunc):
1504 def _getsvfsward(self, origfunc):
1507 """build a ward for self.svfs"""
1505 """build a ward for self.svfs"""
1508 rref = weakref.ref(self)
1506 rref = weakref.ref(self)
1509
1507
1510 def checksvfs(path, mode=None):
1508 def checksvfs(path, mode=None):
1511 ret = origfunc(path, mode=mode)
1509 ret = origfunc(path, mode=mode)
1512 repo = rref()
1510 repo = rref()
1513 if repo is None or not util.safehasattr(repo, b'_lockref'):
1511 if repo is None or not util.safehasattr(repo, b'_lockref'):
1514 return
1512 return
1515 if mode in (None, b'r', b'rb'):
1513 if mode in (None, b'r', b'rb'):
1516 return
1514 return
1517 if path.startswith(repo.sharedpath):
1515 if path.startswith(repo.sharedpath):
1518 # truncate name relative to the repository (.hg)
1516 # truncate name relative to the repository (.hg)
1519 path = path[len(repo.sharedpath) + 1 :]
1517 path = path[len(repo.sharedpath) + 1 :]
1520 if repo._currentlock(repo._lockref) is None:
1518 if repo._currentlock(repo._lockref) is None:
1521 repo.ui.develwarn(
1519 repo.ui.develwarn(
1522 b'write with no lock: "%s"' % path, stacklevel=4
1520 b'write with no lock: "%s"' % path, stacklevel=4
1523 )
1521 )
1524 return ret
1522 return ret
1525
1523
1526 return checksvfs
1524 return checksvfs
1527
1525
1528 def close(self):
1526 def close(self):
1529 self._writecaches()
1527 self._writecaches()
1530
1528
1531 def _writecaches(self):
1529 def _writecaches(self):
1532 if self._revbranchcache:
1530 if self._revbranchcache:
1533 self._revbranchcache.write()
1531 self._revbranchcache.write()
1534
1532
1535 def _restrictcapabilities(self, caps):
1533 def _restrictcapabilities(self, caps):
1536 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1534 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1537 caps = set(caps)
1535 caps = set(caps)
1538 capsblob = bundle2.encodecaps(
1536 capsblob = bundle2.encodecaps(
1539 bundle2.getrepocaps(self, role=b'client')
1537 bundle2.getrepocaps(self, role=b'client')
1540 )
1538 )
1541 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1539 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1542 if self.ui.configbool(b'experimental', b'narrow'):
1540 if self.ui.configbool(b'experimental', b'narrow'):
1543 caps.add(wireprototypes.NARROWCAP)
1541 caps.add(wireprototypes.NARROWCAP)
1544 return caps
1542 return caps
1545
1543
1546 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1544 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1547 # self -> auditor -> self._checknested -> self
1545 # self -> auditor -> self._checknested -> self
1548
1546
1549 @property
1547 @property
1550 def auditor(self):
1548 def auditor(self):
1551 # This is only used by context.workingctx.match in order to
1549 # This is only used by context.workingctx.match in order to
1552 # detect files in subrepos.
1550 # detect files in subrepos.
1553 return pathutil.pathauditor(self.root, callback=self._checknested)
1551 return pathutil.pathauditor(self.root, callback=self._checknested)
1554
1552
1555 @property
1553 @property
1556 def nofsauditor(self):
1554 def nofsauditor(self):
1557 # This is only used by context.basectx.match in order to detect
1555 # This is only used by context.basectx.match in order to detect
1558 # files in subrepos.
1556 # files in subrepos.
1559 return pathutil.pathauditor(
1557 return pathutil.pathauditor(
1560 self.root, callback=self._checknested, realfs=False, cached=True
1558 self.root, callback=self._checknested, realfs=False, cached=True
1561 )
1559 )
1562
1560
1563 def _checknested(self, path):
1561 def _checknested(self, path):
1564 """Determine if path is a legal nested repository."""
1562 """Determine if path is a legal nested repository."""
1565 if not path.startswith(self.root):
1563 if not path.startswith(self.root):
1566 return False
1564 return False
1567 subpath = path[len(self.root) + 1 :]
1565 subpath = path[len(self.root) + 1 :]
1568 normsubpath = util.pconvert(subpath)
1566 normsubpath = util.pconvert(subpath)
1569
1567
1570 # XXX: Checking against the current working copy is wrong in
1568 # XXX: Checking against the current working copy is wrong in
1571 # the sense that it can reject things like
1569 # the sense that it can reject things like
1572 #
1570 #
1573 # $ hg cat -r 10 sub/x.txt
1571 # $ hg cat -r 10 sub/x.txt
1574 #
1572 #
1575 # if sub/ is no longer a subrepository in the working copy
1573 # if sub/ is no longer a subrepository in the working copy
1576 # parent revision.
1574 # parent revision.
1577 #
1575 #
1578 # However, it can of course also allow things that would have
1576 # However, it can of course also allow things that would have
1579 # been rejected before, such as the above cat command if sub/
1577 # been rejected before, such as the above cat command if sub/
1580 # is a subrepository now, but was a normal directory before.
1578 # is a subrepository now, but was a normal directory before.
1581 # The old path auditor would have rejected by mistake since it
1579 # The old path auditor would have rejected by mistake since it
1582 # panics when it sees sub/.hg/.
1580 # panics when it sees sub/.hg/.
1583 #
1581 #
1584 # All in all, checking against the working copy seems sensible
1582 # All in all, checking against the working copy seems sensible
1585 # since we want to prevent access to nested repositories on
1583 # since we want to prevent access to nested repositories on
1586 # the filesystem *now*.
1584 # the filesystem *now*.
1587 ctx = self[None]
1585 ctx = self[None]
1588 parts = util.splitpath(subpath)
1586 parts = util.splitpath(subpath)
1589 while parts:
1587 while parts:
1590 prefix = b'/'.join(parts)
1588 prefix = b'/'.join(parts)
1591 if prefix in ctx.substate:
1589 if prefix in ctx.substate:
1592 if prefix == normsubpath:
1590 if prefix == normsubpath:
1593 return True
1591 return True
1594 else:
1592 else:
1595 sub = ctx.sub(prefix)
1593 sub = ctx.sub(prefix)
1596 return sub.checknested(subpath[len(prefix) + 1 :])
1594 return sub.checknested(subpath[len(prefix) + 1 :])
1597 else:
1595 else:
1598 parts.pop()
1596 parts.pop()
1599 return False
1597 return False
1600
1598
1601 def peer(self):
1599 def peer(self):
1602 return localpeer(self) # not cached to avoid reference cycle
1600 return localpeer(self) # not cached to avoid reference cycle
1603
1601
1604 def unfiltered(self):
1602 def unfiltered(self):
1605 """Return unfiltered version of the repository
1603 """Return unfiltered version of the repository
1606
1604
1607 Intended to be overwritten by filtered repo."""
1605 Intended to be overwritten by filtered repo."""
1608 return self
1606 return self
1609
1607
1610 def filtered(self, name, visibilityexceptions=None):
1608 def filtered(self, name, visibilityexceptions=None):
1611 """Return a filtered version of a repository
1609 """Return a filtered version of a repository
1612
1610
1613 The `name` parameter is the identifier of the requested view. This
1611 The `name` parameter is the identifier of the requested view. This
1614 will return a repoview object set "exactly" to the specified view.
1612 will return a repoview object set "exactly" to the specified view.
1615
1613
1616 This function does not apply recursive filtering to a repository. For
1614 This function does not apply recursive filtering to a repository. For
1617 example calling `repo.filtered("served")` will return a repoview using
1615 example calling `repo.filtered("served")` will return a repoview using
1618 the "served" view, regardless of the initial view used by `repo`.
1616 the "served" view, regardless of the initial view used by `repo`.
1619
1617
1620 In other word, there is always only one level of `repoview` "filtering".
1618 In other word, there is always only one level of `repoview` "filtering".
1621 """
1619 """
1622 if self._extrafilterid is not None and b'%' not in name:
1620 if self._extrafilterid is not None and b'%' not in name:
1623 name = name + b'%' + self._extrafilterid
1621 name = name + b'%' + self._extrafilterid
1624
1622
1625 cls = repoview.newtype(self.unfiltered().__class__)
1623 cls = repoview.newtype(self.unfiltered().__class__)
1626 return cls(self, name, visibilityexceptions)
1624 return cls(self, name, visibilityexceptions)
1627
1625
1628 @mixedrepostorecache(
1626 @mixedrepostorecache(
1629 (b'bookmarks', b'plain'),
1627 (b'bookmarks', b'plain'),
1630 (b'bookmarks.current', b'plain'),
1628 (b'bookmarks.current', b'plain'),
1631 (b'bookmarks', b''),
1629 (b'bookmarks', b''),
1632 (b'00changelog.i', b''),
1630 (b'00changelog.i', b''),
1633 )
1631 )
1634 def _bookmarks(self):
1632 def _bookmarks(self):
1635 # Since the multiple files involved in the transaction cannot be
1633 # Since the multiple files involved in the transaction cannot be
1636 # written atomically (with current repository format), there is a race
1634 # written atomically (with current repository format), there is a race
1637 # condition here.
1635 # condition here.
1638 #
1636 #
1639 # 1) changelog content A is read
1637 # 1) changelog content A is read
1640 # 2) outside transaction update changelog to content B
1638 # 2) outside transaction update changelog to content B
1641 # 3) outside transaction update bookmark file referring to content B
1639 # 3) outside transaction update bookmark file referring to content B
1642 # 4) bookmarks file content is read and filtered against changelog-A
1640 # 4) bookmarks file content is read and filtered against changelog-A
1643 #
1641 #
1644 # When this happens, bookmarks against nodes missing from A are dropped.
1642 # When this happens, bookmarks against nodes missing from A are dropped.
1645 #
1643 #
1646 # Having this happening during read is not great, but it become worse
1644 # Having this happening during read is not great, but it become worse
1647 # when this happen during write because the bookmarks to the "unknown"
1645 # when this happen during write because the bookmarks to the "unknown"
1648 # nodes will be dropped for good. However, writes happen within locks.
1646 # nodes will be dropped for good. However, writes happen within locks.
1649 # This locking makes it possible to have a race free consistent read.
1647 # This locking makes it possible to have a race free consistent read.
1650 # For this purpose data read from disc before locking are
1648 # For this purpose data read from disc before locking are
1651 # "invalidated" right after the locks are taken. This invalidations are
1649 # "invalidated" right after the locks are taken. This invalidations are
1652 # "light", the `filecache` mechanism keep the data in memory and will
1650 # "light", the `filecache` mechanism keep the data in memory and will
1653 # reuse them if the underlying files did not changed. Not parsing the
1651 # reuse them if the underlying files did not changed. Not parsing the
1654 # same data multiple times helps performances.
1652 # same data multiple times helps performances.
1655 #
1653 #
1656 # Unfortunately in the case describe above, the files tracked by the
1654 # Unfortunately in the case describe above, the files tracked by the
1657 # bookmarks file cache might not have changed, but the in-memory
1655 # bookmarks file cache might not have changed, but the in-memory
1658 # content is still "wrong" because we used an older changelog content
1656 # content is still "wrong" because we used an older changelog content
1659 # to process the on-disk data. So after locking, the changelog would be
1657 # to process the on-disk data. So after locking, the changelog would be
1660 # refreshed but `_bookmarks` would be preserved.
1658 # refreshed but `_bookmarks` would be preserved.
1661 # Adding `00changelog.i` to the list of tracked file is not
1659 # Adding `00changelog.i` to the list of tracked file is not
1662 # enough, because at the time we build the content for `_bookmarks` in
1660 # enough, because at the time we build the content for `_bookmarks` in
1663 # (4), the changelog file has already diverged from the content used
1661 # (4), the changelog file has already diverged from the content used
1664 # for loading `changelog` in (1)
1662 # for loading `changelog` in (1)
1665 #
1663 #
1666 # To prevent the issue, we force the changelog to be explicitly
1664 # To prevent the issue, we force the changelog to be explicitly
1667 # reloaded while computing `_bookmarks`. The data race can still happen
1665 # reloaded while computing `_bookmarks`. The data race can still happen
1668 # without the lock (with a narrower window), but it would no longer go
1666 # without the lock (with a narrower window), but it would no longer go
1669 # undetected during the lock time refresh.
1667 # undetected during the lock time refresh.
1670 #
1668 #
1671 # The new schedule is as follow
1669 # The new schedule is as follow
1672 #
1670 #
1673 # 1) filecache logic detect that `_bookmarks` needs to be computed
1671 # 1) filecache logic detect that `_bookmarks` needs to be computed
1674 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1672 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1675 # 3) We force `changelog` filecache to be tested
1673 # 3) We force `changelog` filecache to be tested
1676 # 4) cachestat for `changelog` are captured (for changelog)
1674 # 4) cachestat for `changelog` are captured (for changelog)
1677 # 5) `_bookmarks` is computed and cached
1675 # 5) `_bookmarks` is computed and cached
1678 #
1676 #
1679 # The step in (3) ensure we have a changelog at least as recent as the
1677 # The step in (3) ensure we have a changelog at least as recent as the
1680 # cache stat computed in (1). As a result at locking time:
1678 # cache stat computed in (1). As a result at locking time:
1681 # * if the changelog did not changed since (1) -> we can reuse the data
1679 # * if the changelog did not changed since (1) -> we can reuse the data
1682 # * otherwise -> the bookmarks get refreshed.
1680 # * otherwise -> the bookmarks get refreshed.
1683 self._refreshchangelog()
1681 self._refreshchangelog()
1684 return bookmarks.bmstore(self)
1682 return bookmarks.bmstore(self)
1685
1683
1686 def _refreshchangelog(self):
1684 def _refreshchangelog(self):
1687 """make sure the in memory changelog match the on-disk one"""
1685 """make sure the in memory changelog match the on-disk one"""
1688 if 'changelog' in vars(self) and self.currenttransaction() is None:
1686 if 'changelog' in vars(self) and self.currenttransaction() is None:
1689 del self.changelog
1687 del self.changelog
1690
1688
1691 @property
1689 @property
1692 def _activebookmark(self):
1690 def _activebookmark(self):
1693 return self._bookmarks.active
1691 return self._bookmarks.active
1694
1692
1695 # _phasesets depend on changelog. what we need is to call
1693 # _phasesets depend on changelog. what we need is to call
1696 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1694 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1697 # can't be easily expressed in filecache mechanism.
1695 # can't be easily expressed in filecache mechanism.
1698 @storecache(b'phaseroots', b'00changelog.i')
1696 @storecache(b'phaseroots', b'00changelog.i')
1699 def _phasecache(self):
1697 def _phasecache(self):
1700 return phases.phasecache(self, self._phasedefaults)
1698 return phases.phasecache(self, self._phasedefaults)
1701
1699
1702 @storecache(b'obsstore')
1700 @storecache(b'obsstore')
1703 def obsstore(self):
1701 def obsstore(self):
1704 return obsolete.makestore(self.ui, self)
1702 return obsolete.makestore(self.ui, self)
1705
1703
1706 @changelogcache()
1704 @changelogcache()
1707 def changelog(repo):
1705 def changelog(repo):
1708 # load dirstate before changelog to avoid race see issue6303
1706 # load dirstate before changelog to avoid race see issue6303
1709 repo.dirstate.prefetch_parents()
1707 repo.dirstate.prefetch_parents()
1710 return repo.store.changelog(
1708 return repo.store.changelog(
1711 txnutil.mayhavepending(repo.root),
1709 txnutil.mayhavepending(repo.root),
1712 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1710 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1713 )
1711 )
1714
1712
1715 @manifestlogcache()
1713 @manifestlogcache()
1716 def manifestlog(self):
1714 def manifestlog(self):
1717 return self.store.manifestlog(self, self._storenarrowmatch)
1715 return self.store.manifestlog(self, self._storenarrowmatch)
1718
1716
1719 @repofilecache(b'dirstate')
1717 @repofilecache(b'dirstate')
1720 def dirstate(self):
1718 def dirstate(self):
1721 return self._makedirstate()
1719 return self._makedirstate()
1722
1720
1723 def _makedirstate(self):
1721 def _makedirstate(self):
1724 """Extension point for wrapping the dirstate per-repo."""
1722 """Extension point for wrapping the dirstate per-repo."""
1725 sparsematchfn = lambda: sparse.matcher(self)
1723 sparsematchfn = lambda: sparse.matcher(self)
1726 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1724 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1727 use_dirstate_v2 = v2_req in self.requirements
1725 use_dirstate_v2 = v2_req in self.requirements
1728
1726
1729 return dirstate.dirstate(
1727 return dirstate.dirstate(
1730 self.vfs,
1728 self.vfs,
1731 self.ui,
1729 self.ui,
1732 self.root,
1730 self.root,
1733 self._dirstatevalidate,
1731 self._dirstatevalidate,
1734 sparsematchfn,
1732 sparsematchfn,
1735 self.nodeconstants,
1733 self.nodeconstants,
1736 use_dirstate_v2,
1734 use_dirstate_v2,
1737 )
1735 )
1738
1736
1739 def _dirstatevalidate(self, node):
1737 def _dirstatevalidate(self, node):
1740 try:
1738 try:
1741 self.changelog.rev(node)
1739 self.changelog.rev(node)
1742 return node
1740 return node
1743 except error.LookupError:
1741 except error.LookupError:
1744 if not self._dirstatevalidatewarned:
1742 if not self._dirstatevalidatewarned:
1745 self._dirstatevalidatewarned = True
1743 self._dirstatevalidatewarned = True
1746 self.ui.warn(
1744 self.ui.warn(
1747 _(b"warning: ignoring unknown working parent %s!\n")
1745 _(b"warning: ignoring unknown working parent %s!\n")
1748 % short(node)
1746 % short(node)
1749 )
1747 )
1750 return self.nullid
1748 return self.nullid
1751
1749
1752 @storecache(narrowspec.FILENAME)
1750 @storecache(narrowspec.FILENAME)
1753 def narrowpats(self):
1751 def narrowpats(self):
1754 """matcher patterns for this repository's narrowspec
1752 """matcher patterns for this repository's narrowspec
1755
1753
1756 A tuple of (includes, excludes).
1754 A tuple of (includes, excludes).
1757 """
1755 """
1758 return narrowspec.load(self)
1756 return narrowspec.load(self)
1759
1757
1760 @storecache(narrowspec.FILENAME)
1758 @storecache(narrowspec.FILENAME)
1761 def _storenarrowmatch(self):
1759 def _storenarrowmatch(self):
1762 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1760 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1763 return matchmod.always()
1761 return matchmod.always()
1764 include, exclude = self.narrowpats
1762 include, exclude = self.narrowpats
1765 return narrowspec.match(self.root, include=include, exclude=exclude)
1763 return narrowspec.match(self.root, include=include, exclude=exclude)
1766
1764
1767 @storecache(narrowspec.FILENAME)
1765 @storecache(narrowspec.FILENAME)
1768 def _narrowmatch(self):
1766 def _narrowmatch(self):
1769 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1767 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1770 return matchmod.always()
1768 return matchmod.always()
1771 narrowspec.checkworkingcopynarrowspec(self)
1769 narrowspec.checkworkingcopynarrowspec(self)
1772 include, exclude = self.narrowpats
1770 include, exclude = self.narrowpats
1773 return narrowspec.match(self.root, include=include, exclude=exclude)
1771 return narrowspec.match(self.root, include=include, exclude=exclude)
1774
1772
1775 def narrowmatch(self, match=None, includeexact=False):
1773 def narrowmatch(self, match=None, includeexact=False):
1776 """matcher corresponding the the repo's narrowspec
1774 """matcher corresponding the the repo's narrowspec
1777
1775
1778 If `match` is given, then that will be intersected with the narrow
1776 If `match` is given, then that will be intersected with the narrow
1779 matcher.
1777 matcher.
1780
1778
1781 If `includeexact` is True, then any exact matches from `match` will
1779 If `includeexact` is True, then any exact matches from `match` will
1782 be included even if they're outside the narrowspec.
1780 be included even if they're outside the narrowspec.
1783 """
1781 """
1784 if match:
1782 if match:
1785 if includeexact and not self._narrowmatch.always():
1783 if includeexact and not self._narrowmatch.always():
1786 # do not exclude explicitly-specified paths so that they can
1784 # do not exclude explicitly-specified paths so that they can
1787 # be warned later on
1785 # be warned later on
1788 em = matchmod.exact(match.files())
1786 em = matchmod.exact(match.files())
1789 nm = matchmod.unionmatcher([self._narrowmatch, em])
1787 nm = matchmod.unionmatcher([self._narrowmatch, em])
1790 return matchmod.intersectmatchers(match, nm)
1788 return matchmod.intersectmatchers(match, nm)
1791 return matchmod.intersectmatchers(match, self._narrowmatch)
1789 return matchmod.intersectmatchers(match, self._narrowmatch)
1792 return self._narrowmatch
1790 return self._narrowmatch
1793
1791
1794 def setnarrowpats(self, newincludes, newexcludes):
1792 def setnarrowpats(self, newincludes, newexcludes):
1795 narrowspec.save(self, newincludes, newexcludes)
1793 narrowspec.save(self, newincludes, newexcludes)
1796 self.invalidate(clearfilecache=True)
1794 self.invalidate(clearfilecache=True)
1797
1795
1798 @unfilteredpropertycache
1796 @unfilteredpropertycache
1799 def _quick_access_changeid_null(self):
1797 def _quick_access_changeid_null(self):
1800 return {
1798 return {
1801 b'null': (nullrev, self.nodeconstants.nullid),
1799 b'null': (nullrev, self.nodeconstants.nullid),
1802 nullrev: (nullrev, self.nodeconstants.nullid),
1800 nullrev: (nullrev, self.nodeconstants.nullid),
1803 self.nullid: (nullrev, self.nullid),
1801 self.nullid: (nullrev, self.nullid),
1804 }
1802 }
1805
1803
1806 @unfilteredpropertycache
1804 @unfilteredpropertycache
1807 def _quick_access_changeid_wc(self):
1805 def _quick_access_changeid_wc(self):
1808 # also fast path access to the working copy parents
1806 # also fast path access to the working copy parents
1809 # however, only do it for filter that ensure wc is visible.
1807 # however, only do it for filter that ensure wc is visible.
1810 quick = self._quick_access_changeid_null.copy()
1808 quick = self._quick_access_changeid_null.copy()
1811 cl = self.unfiltered().changelog
1809 cl = self.unfiltered().changelog
1812 for node in self.dirstate.parents():
1810 for node in self.dirstate.parents():
1813 if node == self.nullid:
1811 if node == self.nullid:
1814 continue
1812 continue
1815 rev = cl.index.get_rev(node)
1813 rev = cl.index.get_rev(node)
1816 if rev is None:
1814 if rev is None:
1817 # unknown working copy parent case:
1815 # unknown working copy parent case:
1818 #
1816 #
1819 # skip the fast path and let higher code deal with it
1817 # skip the fast path and let higher code deal with it
1820 continue
1818 continue
1821 pair = (rev, node)
1819 pair = (rev, node)
1822 quick[rev] = pair
1820 quick[rev] = pair
1823 quick[node] = pair
1821 quick[node] = pair
1824 # also add the parents of the parents
1822 # also add the parents of the parents
1825 for r in cl.parentrevs(rev):
1823 for r in cl.parentrevs(rev):
1826 if r == nullrev:
1824 if r == nullrev:
1827 continue
1825 continue
1828 n = cl.node(r)
1826 n = cl.node(r)
1829 pair = (r, n)
1827 pair = (r, n)
1830 quick[r] = pair
1828 quick[r] = pair
1831 quick[n] = pair
1829 quick[n] = pair
1832 p1node = self.dirstate.p1()
1830 p1node = self.dirstate.p1()
1833 if p1node != self.nullid:
1831 if p1node != self.nullid:
1834 quick[b'.'] = quick[p1node]
1832 quick[b'.'] = quick[p1node]
1835 return quick
1833 return quick
1836
1834
1837 @unfilteredmethod
1835 @unfilteredmethod
1838 def _quick_access_changeid_invalidate(self):
1836 def _quick_access_changeid_invalidate(self):
1839 if '_quick_access_changeid_wc' in vars(self):
1837 if '_quick_access_changeid_wc' in vars(self):
1840 del self.__dict__['_quick_access_changeid_wc']
1838 del self.__dict__['_quick_access_changeid_wc']
1841
1839
1842 @property
1840 @property
1843 def _quick_access_changeid(self):
1841 def _quick_access_changeid(self):
1844 """an helper dictionnary for __getitem__ calls
1842 """an helper dictionnary for __getitem__ calls
1845
1843
1846 This contains a list of symbol we can recognise right away without
1844 This contains a list of symbol we can recognise right away without
1847 further processing.
1845 further processing.
1848 """
1846 """
1849 if self.filtername in repoview.filter_has_wc:
1847 if self.filtername in repoview.filter_has_wc:
1850 return self._quick_access_changeid_wc
1848 return self._quick_access_changeid_wc
1851 return self._quick_access_changeid_null
1849 return self._quick_access_changeid_null
1852
1850
1853 def __getitem__(self, changeid):
1851 def __getitem__(self, changeid):
1854 # dealing with special cases
1852 # dealing with special cases
1855 if changeid is None:
1853 if changeid is None:
1856 return context.workingctx(self)
1854 return context.workingctx(self)
1857 if isinstance(changeid, context.basectx):
1855 if isinstance(changeid, context.basectx):
1858 return changeid
1856 return changeid
1859
1857
1860 # dealing with multiple revisions
1858 # dealing with multiple revisions
1861 if isinstance(changeid, slice):
1859 if isinstance(changeid, slice):
1862 # wdirrev isn't contiguous so the slice shouldn't include it
1860 # wdirrev isn't contiguous so the slice shouldn't include it
1863 return [
1861 return [
1864 self[i]
1862 self[i]
1865 for i in pycompat.xrange(*changeid.indices(len(self)))
1863 for i in pycompat.xrange(*changeid.indices(len(self)))
1866 if i not in self.changelog.filteredrevs
1864 if i not in self.changelog.filteredrevs
1867 ]
1865 ]
1868
1866
1869 # dealing with some special values
1867 # dealing with some special values
1870 quick_access = self._quick_access_changeid.get(changeid)
1868 quick_access = self._quick_access_changeid.get(changeid)
1871 if quick_access is not None:
1869 if quick_access is not None:
1872 rev, node = quick_access
1870 rev, node = quick_access
1873 return context.changectx(self, rev, node, maybe_filtered=False)
1871 return context.changectx(self, rev, node, maybe_filtered=False)
1874 if changeid == b'tip':
1872 if changeid == b'tip':
1875 node = self.changelog.tip()
1873 node = self.changelog.tip()
1876 rev = self.changelog.rev(node)
1874 rev = self.changelog.rev(node)
1877 return context.changectx(self, rev, node)
1875 return context.changectx(self, rev, node)
1878
1876
1879 # dealing with arbitrary values
1877 # dealing with arbitrary values
1880 try:
1878 try:
1881 if isinstance(changeid, int):
1879 if isinstance(changeid, int):
1882 node = self.changelog.node(changeid)
1880 node = self.changelog.node(changeid)
1883 rev = changeid
1881 rev = changeid
1884 elif changeid == b'.':
1882 elif changeid == b'.':
1885 # this is a hack to delay/avoid loading obsmarkers
1883 # this is a hack to delay/avoid loading obsmarkers
1886 # when we know that '.' won't be hidden
1884 # when we know that '.' won't be hidden
1887 node = self.dirstate.p1()
1885 node = self.dirstate.p1()
1888 rev = self.unfiltered().changelog.rev(node)
1886 rev = self.unfiltered().changelog.rev(node)
1889 elif len(changeid) == self.nodeconstants.nodelen:
1887 elif len(changeid) == self.nodeconstants.nodelen:
1890 try:
1888 try:
1891 node = changeid
1889 node = changeid
1892 rev = self.changelog.rev(changeid)
1890 rev = self.changelog.rev(changeid)
1893 except error.FilteredLookupError:
1891 except error.FilteredLookupError:
1894 changeid = hex(changeid) # for the error message
1892 changeid = hex(changeid) # for the error message
1895 raise
1893 raise
1896 except LookupError:
1894 except LookupError:
1897 # check if it might have come from damaged dirstate
1895 # check if it might have come from damaged dirstate
1898 #
1896 #
1899 # XXX we could avoid the unfiltered if we had a recognizable
1897 # XXX we could avoid the unfiltered if we had a recognizable
1900 # exception for filtered changeset access
1898 # exception for filtered changeset access
1901 if (
1899 if (
1902 self.local()
1900 self.local()
1903 and changeid in self.unfiltered().dirstate.parents()
1901 and changeid in self.unfiltered().dirstate.parents()
1904 ):
1902 ):
1905 msg = _(b"working directory has unknown parent '%s'!")
1903 msg = _(b"working directory has unknown parent '%s'!")
1906 raise error.Abort(msg % short(changeid))
1904 raise error.Abort(msg % short(changeid))
1907 changeid = hex(changeid) # for the error message
1905 changeid = hex(changeid) # for the error message
1908 raise
1906 raise
1909
1907
1910 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1908 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1911 node = bin(changeid)
1909 node = bin(changeid)
1912 rev = self.changelog.rev(node)
1910 rev = self.changelog.rev(node)
1913 else:
1911 else:
1914 raise error.ProgrammingError(
1912 raise error.ProgrammingError(
1915 b"unsupported changeid '%s' of type %s"
1913 b"unsupported changeid '%s' of type %s"
1916 % (changeid, pycompat.bytestr(type(changeid)))
1914 % (changeid, pycompat.bytestr(type(changeid)))
1917 )
1915 )
1918
1916
1919 return context.changectx(self, rev, node)
1917 return context.changectx(self, rev, node)
1920
1918
1921 except (error.FilteredIndexError, error.FilteredLookupError):
1919 except (error.FilteredIndexError, error.FilteredLookupError):
1922 raise error.FilteredRepoLookupError(
1920 raise error.FilteredRepoLookupError(
1923 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1921 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1924 )
1922 )
1925 except (IndexError, LookupError):
1923 except (IndexError, LookupError):
1926 raise error.RepoLookupError(
1924 raise error.RepoLookupError(
1927 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1925 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1928 )
1926 )
1929 except error.WdirUnsupported:
1927 except error.WdirUnsupported:
1930 return context.workingctx(self)
1928 return context.workingctx(self)
1931
1929
1932 def __contains__(self, changeid):
1930 def __contains__(self, changeid):
1933 """True if the given changeid exists"""
1931 """True if the given changeid exists"""
1934 try:
1932 try:
1935 self[changeid]
1933 self[changeid]
1936 return True
1934 return True
1937 except error.RepoLookupError:
1935 except error.RepoLookupError:
1938 return False
1936 return False
1939
1937
1940 def __nonzero__(self):
1938 def __nonzero__(self):
1941 return True
1939 return True
1942
1940
1943 __bool__ = __nonzero__
1941 __bool__ = __nonzero__
1944
1942
1945 def __len__(self):
1943 def __len__(self):
1946 # no need to pay the cost of repoview.changelog
1944 # no need to pay the cost of repoview.changelog
1947 unfi = self.unfiltered()
1945 unfi = self.unfiltered()
1948 return len(unfi.changelog)
1946 return len(unfi.changelog)
1949
1947
1950 def __iter__(self):
1948 def __iter__(self):
1951 return iter(self.changelog)
1949 return iter(self.changelog)
1952
1950
1953 def revs(self, expr, *args):
1951 def revs(self, expr, *args):
1954 """Find revisions matching a revset.
1952 """Find revisions matching a revset.
1955
1953
1956 The revset is specified as a string ``expr`` that may contain
1954 The revset is specified as a string ``expr`` that may contain
1957 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1955 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1958
1956
1959 Revset aliases from the configuration are not expanded. To expand
1957 Revset aliases from the configuration are not expanded. To expand
1960 user aliases, consider calling ``scmutil.revrange()`` or
1958 user aliases, consider calling ``scmutil.revrange()`` or
1961 ``repo.anyrevs([expr], user=True)``.
1959 ``repo.anyrevs([expr], user=True)``.
1962
1960
1963 Returns a smartset.abstractsmartset, which is a list-like interface
1961 Returns a smartset.abstractsmartset, which is a list-like interface
1964 that contains integer revisions.
1962 that contains integer revisions.
1965 """
1963 """
1966 tree = revsetlang.spectree(expr, *args)
1964 tree = revsetlang.spectree(expr, *args)
1967 return revset.makematcher(tree)(self)
1965 return revset.makematcher(tree)(self)
1968
1966
1969 def set(self, expr, *args):
1967 def set(self, expr, *args):
1970 """Find revisions matching a revset and emit changectx instances.
1968 """Find revisions matching a revset and emit changectx instances.
1971
1969
1972 This is a convenience wrapper around ``revs()`` that iterates the
1970 This is a convenience wrapper around ``revs()`` that iterates the
1973 result and is a generator of changectx instances.
1971 result and is a generator of changectx instances.
1974
1972
1975 Revset aliases from the configuration are not expanded. To expand
1973 Revset aliases from the configuration are not expanded. To expand
1976 user aliases, consider calling ``scmutil.revrange()``.
1974 user aliases, consider calling ``scmutil.revrange()``.
1977 """
1975 """
1978 for r in self.revs(expr, *args):
1976 for r in self.revs(expr, *args):
1979 yield self[r]
1977 yield self[r]
1980
1978
1981 def anyrevs(self, specs, user=False, localalias=None):
1979 def anyrevs(self, specs, user=False, localalias=None):
1982 """Find revisions matching one of the given revsets.
1980 """Find revisions matching one of the given revsets.
1983
1981
1984 Revset aliases from the configuration are not expanded by default. To
1982 Revset aliases from the configuration are not expanded by default. To
1985 expand user aliases, specify ``user=True``. To provide some local
1983 expand user aliases, specify ``user=True``. To provide some local
1986 definitions overriding user aliases, set ``localalias`` to
1984 definitions overriding user aliases, set ``localalias`` to
1987 ``{name: definitionstring}``.
1985 ``{name: definitionstring}``.
1988 """
1986 """
1989 if specs == [b'null']:
1987 if specs == [b'null']:
1990 return revset.baseset([nullrev])
1988 return revset.baseset([nullrev])
1991 if specs == [b'.']:
1989 if specs == [b'.']:
1992 quick_data = self._quick_access_changeid.get(b'.')
1990 quick_data = self._quick_access_changeid.get(b'.')
1993 if quick_data is not None:
1991 if quick_data is not None:
1994 return revset.baseset([quick_data[0]])
1992 return revset.baseset([quick_data[0]])
1995 if user:
1993 if user:
1996 m = revset.matchany(
1994 m = revset.matchany(
1997 self.ui,
1995 self.ui,
1998 specs,
1996 specs,
1999 lookup=revset.lookupfn(self),
1997 lookup=revset.lookupfn(self),
2000 localalias=localalias,
1998 localalias=localalias,
2001 )
1999 )
2002 else:
2000 else:
2003 m = revset.matchany(None, specs, localalias=localalias)
2001 m = revset.matchany(None, specs, localalias=localalias)
2004 return m(self)
2002 return m(self)
2005
2003
2006 def url(self):
2004 def url(self):
2007 return b'file:' + self.root
2005 return b'file:' + self.root
2008
2006
2009 def hook(self, name, throw=False, **args):
2007 def hook(self, name, throw=False, **args):
2010 """Call a hook, passing this repo instance.
2008 """Call a hook, passing this repo instance.
2011
2009
2012 This a convenience method to aid invoking hooks. Extensions likely
2010 This a convenience method to aid invoking hooks. Extensions likely
2013 won't call this unless they have registered a custom hook or are
2011 won't call this unless they have registered a custom hook or are
2014 replacing code that is expected to call a hook.
2012 replacing code that is expected to call a hook.
2015 """
2013 """
2016 return hook.hook(self.ui, self, name, throw, **args)
2014 return hook.hook(self.ui, self, name, throw, **args)
2017
2015
2018 @filteredpropertycache
2016 @filteredpropertycache
2019 def _tagscache(self):
2017 def _tagscache(self):
2020 """Returns a tagscache object that contains various tags related
2018 """Returns a tagscache object that contains various tags related
2021 caches."""
2019 caches."""
2022
2020
2023 # This simplifies its cache management by having one decorated
2021 # This simplifies its cache management by having one decorated
2024 # function (this one) and the rest simply fetch things from it.
2022 # function (this one) and the rest simply fetch things from it.
2025 class tagscache(object):
2023 class tagscache(object):
2026 def __init__(self):
2024 def __init__(self):
2027 # These two define the set of tags for this repository. tags
2025 # These two define the set of tags for this repository. tags
2028 # maps tag name to node; tagtypes maps tag name to 'global' or
2026 # maps tag name to node; tagtypes maps tag name to 'global' or
2029 # 'local'. (Global tags are defined by .hgtags across all
2027 # 'local'. (Global tags are defined by .hgtags across all
2030 # heads, and local tags are defined in .hg/localtags.)
2028 # heads, and local tags are defined in .hg/localtags.)
2031 # They constitute the in-memory cache of tags.
2029 # They constitute the in-memory cache of tags.
2032 self.tags = self.tagtypes = None
2030 self.tags = self.tagtypes = None
2033
2031
2034 self.nodetagscache = self.tagslist = None
2032 self.nodetagscache = self.tagslist = None
2035
2033
2036 cache = tagscache()
2034 cache = tagscache()
2037 cache.tags, cache.tagtypes = self._findtags()
2035 cache.tags, cache.tagtypes = self._findtags()
2038
2036
2039 return cache
2037 return cache
2040
2038
2041 def tags(self):
2039 def tags(self):
2042 '''return a mapping of tag to node'''
2040 '''return a mapping of tag to node'''
2043 t = {}
2041 t = {}
2044 if self.changelog.filteredrevs:
2042 if self.changelog.filteredrevs:
2045 tags, tt = self._findtags()
2043 tags, tt = self._findtags()
2046 else:
2044 else:
2047 tags = self._tagscache.tags
2045 tags = self._tagscache.tags
2048 rev = self.changelog.rev
2046 rev = self.changelog.rev
2049 for k, v in pycompat.iteritems(tags):
2047 for k, v in pycompat.iteritems(tags):
2050 try:
2048 try:
2051 # ignore tags to unknown nodes
2049 # ignore tags to unknown nodes
2052 rev(v)
2050 rev(v)
2053 t[k] = v
2051 t[k] = v
2054 except (error.LookupError, ValueError):
2052 except (error.LookupError, ValueError):
2055 pass
2053 pass
2056 return t
2054 return t
2057
2055
2058 def _findtags(self):
2056 def _findtags(self):
2059 """Do the hard work of finding tags. Return a pair of dicts
2057 """Do the hard work of finding tags. Return a pair of dicts
2060 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2058 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2061 maps tag name to a string like \'global\' or \'local\'.
2059 maps tag name to a string like \'global\' or \'local\'.
2062 Subclasses or extensions are free to add their own tags, but
2060 Subclasses or extensions are free to add their own tags, but
2063 should be aware that the returned dicts will be retained for the
2061 should be aware that the returned dicts will be retained for the
2064 duration of the localrepo object."""
2062 duration of the localrepo object."""
2065
2063
2066 # XXX what tagtype should subclasses/extensions use? Currently
2064 # XXX what tagtype should subclasses/extensions use? Currently
2067 # mq and bookmarks add tags, but do not set the tagtype at all.
2065 # mq and bookmarks add tags, but do not set the tagtype at all.
2068 # Should each extension invent its own tag type? Should there
2066 # Should each extension invent its own tag type? Should there
2069 # be one tagtype for all such "virtual" tags? Or is the status
2067 # be one tagtype for all such "virtual" tags? Or is the status
2070 # quo fine?
2068 # quo fine?
2071
2069
2072 # map tag name to (node, hist)
2070 # map tag name to (node, hist)
2073 alltags = tagsmod.findglobaltags(self.ui, self)
2071 alltags = tagsmod.findglobaltags(self.ui, self)
2074 # map tag name to tag type
2072 # map tag name to tag type
2075 tagtypes = {tag: b'global' for tag in alltags}
2073 tagtypes = {tag: b'global' for tag in alltags}
2076
2074
2077 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2075 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2078
2076
2079 # Build the return dicts. Have to re-encode tag names because
2077 # Build the return dicts. Have to re-encode tag names because
2080 # the tags module always uses UTF-8 (in order not to lose info
2078 # the tags module always uses UTF-8 (in order not to lose info
2081 # writing to the cache), but the rest of Mercurial wants them in
2079 # writing to the cache), but the rest of Mercurial wants them in
2082 # local encoding.
2080 # local encoding.
2083 tags = {}
2081 tags = {}
2084 for (name, (node, hist)) in pycompat.iteritems(alltags):
2082 for (name, (node, hist)) in pycompat.iteritems(alltags):
2085 if node != self.nullid:
2083 if node != self.nullid:
2086 tags[encoding.tolocal(name)] = node
2084 tags[encoding.tolocal(name)] = node
2087 tags[b'tip'] = self.changelog.tip()
2085 tags[b'tip'] = self.changelog.tip()
2088 tagtypes = {
2086 tagtypes = {
2089 encoding.tolocal(name): value
2087 encoding.tolocal(name): value
2090 for (name, value) in pycompat.iteritems(tagtypes)
2088 for (name, value) in pycompat.iteritems(tagtypes)
2091 }
2089 }
2092 return (tags, tagtypes)
2090 return (tags, tagtypes)
2093
2091
2094 def tagtype(self, tagname):
2092 def tagtype(self, tagname):
2095 """
2093 """
2096 return the type of the given tag. result can be:
2094 return the type of the given tag. result can be:
2097
2095
2098 'local' : a local tag
2096 'local' : a local tag
2099 'global' : a global tag
2097 'global' : a global tag
2100 None : tag does not exist
2098 None : tag does not exist
2101 """
2099 """
2102
2100
2103 return self._tagscache.tagtypes.get(tagname)
2101 return self._tagscache.tagtypes.get(tagname)
2104
2102
2105 def tagslist(self):
2103 def tagslist(self):
2106 '''return a list of tags ordered by revision'''
2104 '''return a list of tags ordered by revision'''
2107 if not self._tagscache.tagslist:
2105 if not self._tagscache.tagslist:
2108 l = []
2106 l = []
2109 for t, n in pycompat.iteritems(self.tags()):
2107 for t, n in pycompat.iteritems(self.tags()):
2110 l.append((self.changelog.rev(n), t, n))
2108 l.append((self.changelog.rev(n), t, n))
2111 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2109 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2112
2110
2113 return self._tagscache.tagslist
2111 return self._tagscache.tagslist
2114
2112
2115 def nodetags(self, node):
2113 def nodetags(self, node):
2116 '''return the tags associated with a node'''
2114 '''return the tags associated with a node'''
2117 if not self._tagscache.nodetagscache:
2115 if not self._tagscache.nodetagscache:
2118 nodetagscache = {}
2116 nodetagscache = {}
2119 for t, n in pycompat.iteritems(self._tagscache.tags):
2117 for t, n in pycompat.iteritems(self._tagscache.tags):
2120 nodetagscache.setdefault(n, []).append(t)
2118 nodetagscache.setdefault(n, []).append(t)
2121 for tags in pycompat.itervalues(nodetagscache):
2119 for tags in pycompat.itervalues(nodetagscache):
2122 tags.sort()
2120 tags.sort()
2123 self._tagscache.nodetagscache = nodetagscache
2121 self._tagscache.nodetagscache = nodetagscache
2124 return self._tagscache.nodetagscache.get(node, [])
2122 return self._tagscache.nodetagscache.get(node, [])
2125
2123
2126 def nodebookmarks(self, node):
2124 def nodebookmarks(self, node):
2127 """return the list of bookmarks pointing to the specified node"""
2125 """return the list of bookmarks pointing to the specified node"""
2128 return self._bookmarks.names(node)
2126 return self._bookmarks.names(node)
2129
2127
2130 def branchmap(self):
2128 def branchmap(self):
2131 """returns a dictionary {branch: [branchheads]} with branchheads
2129 """returns a dictionary {branch: [branchheads]} with branchheads
2132 ordered by increasing revision number"""
2130 ordered by increasing revision number"""
2133 return self._branchcaches[self]
2131 return self._branchcaches[self]
2134
2132
2135 @unfilteredmethod
2133 @unfilteredmethod
2136 def revbranchcache(self):
2134 def revbranchcache(self):
2137 if not self._revbranchcache:
2135 if not self._revbranchcache:
2138 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2136 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2139 return self._revbranchcache
2137 return self._revbranchcache
2140
2138
2141 def register_changeset(self, rev, changelogrevision):
2139 def register_changeset(self, rev, changelogrevision):
2142 self.revbranchcache().setdata(rev, changelogrevision)
2140 self.revbranchcache().setdata(rev, changelogrevision)
2143
2141
2144 def branchtip(self, branch, ignoremissing=False):
2142 def branchtip(self, branch, ignoremissing=False):
2145 """return the tip node for a given branch
2143 """return the tip node for a given branch
2146
2144
2147 If ignoremissing is True, then this method will not raise an error.
2145 If ignoremissing is True, then this method will not raise an error.
2148 This is helpful for callers that only expect None for a missing branch
2146 This is helpful for callers that only expect None for a missing branch
2149 (e.g. namespace).
2147 (e.g. namespace).
2150
2148
2151 """
2149 """
2152 try:
2150 try:
2153 return self.branchmap().branchtip(branch)
2151 return self.branchmap().branchtip(branch)
2154 except KeyError:
2152 except KeyError:
2155 if not ignoremissing:
2153 if not ignoremissing:
2156 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2154 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2157 else:
2155 else:
2158 pass
2156 pass
2159
2157
2160 def lookup(self, key):
2158 def lookup(self, key):
2161 node = scmutil.revsymbol(self, key).node()
2159 node = scmutil.revsymbol(self, key).node()
2162 if node is None:
2160 if node is None:
2163 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2161 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2164 return node
2162 return node
2165
2163
2166 def lookupbranch(self, key):
2164 def lookupbranch(self, key):
2167 if self.branchmap().hasbranch(key):
2165 if self.branchmap().hasbranch(key):
2168 return key
2166 return key
2169
2167
2170 return scmutil.revsymbol(self, key).branch()
2168 return scmutil.revsymbol(self, key).branch()
2171
2169
2172 def known(self, nodes):
2170 def known(self, nodes):
2173 cl = self.changelog
2171 cl = self.changelog
2174 get_rev = cl.index.get_rev
2172 get_rev = cl.index.get_rev
2175 filtered = cl.filteredrevs
2173 filtered = cl.filteredrevs
2176 result = []
2174 result = []
2177 for n in nodes:
2175 for n in nodes:
2178 r = get_rev(n)
2176 r = get_rev(n)
2179 resp = not (r is None or r in filtered)
2177 resp = not (r is None or r in filtered)
2180 result.append(resp)
2178 result.append(resp)
2181 return result
2179 return result
2182
2180
2183 def local(self):
2181 def local(self):
2184 return self
2182 return self
2185
2183
2186 def publishing(self):
2184 def publishing(self):
2187 # it's safe (and desirable) to trust the publish flag unconditionally
2185 # it's safe (and desirable) to trust the publish flag unconditionally
2188 # so that we don't finalize changes shared between users via ssh or nfs
2186 # so that we don't finalize changes shared between users via ssh or nfs
2189 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2187 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2190
2188
2191 def cancopy(self):
2189 def cancopy(self):
2192 # so statichttprepo's override of local() works
2190 # so statichttprepo's override of local() works
2193 if not self.local():
2191 if not self.local():
2194 return False
2192 return False
2195 if not self.publishing():
2193 if not self.publishing():
2196 return True
2194 return True
2197 # if publishing we can't copy if there is filtered content
2195 # if publishing we can't copy if there is filtered content
2198 return not self.filtered(b'visible').changelog.filteredrevs
2196 return not self.filtered(b'visible').changelog.filteredrevs
2199
2197
2200 def shared(self):
2198 def shared(self):
2201 '''the type of shared repository (None if not shared)'''
2199 '''the type of shared repository (None if not shared)'''
2202 if self.sharedpath != self.path:
2200 if self.sharedpath != self.path:
2203 return b'store'
2201 return b'store'
2204 return None
2202 return None
2205
2203
2206 def wjoin(self, f, *insidef):
2204 def wjoin(self, f, *insidef):
2207 return self.vfs.reljoin(self.root, f, *insidef)
2205 return self.vfs.reljoin(self.root, f, *insidef)
2208
2206
2209 def setparents(self, p1, p2=None):
2207 def setparents(self, p1, p2=None):
2210 if p2 is None:
2208 if p2 is None:
2211 p2 = self.nullid
2209 p2 = self.nullid
2212 self[None].setparents(p1, p2)
2210 self[None].setparents(p1, p2)
2213 self._quick_access_changeid_invalidate()
2211 self._quick_access_changeid_invalidate()
2214
2212
2215 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2213 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2216 """changeid must be a changeset revision, if specified.
2214 """changeid must be a changeset revision, if specified.
2217 fileid can be a file revision or node."""
2215 fileid can be a file revision or node."""
2218 return context.filectx(
2216 return context.filectx(
2219 self, path, changeid, fileid, changectx=changectx
2217 self, path, changeid, fileid, changectx=changectx
2220 )
2218 )
2221
2219
2222 def getcwd(self):
2220 def getcwd(self):
2223 return self.dirstate.getcwd()
2221 return self.dirstate.getcwd()
2224
2222
2225 def pathto(self, f, cwd=None):
2223 def pathto(self, f, cwd=None):
2226 return self.dirstate.pathto(f, cwd)
2224 return self.dirstate.pathto(f, cwd)
2227
2225
2228 def _loadfilter(self, filter):
2226 def _loadfilter(self, filter):
2229 if filter not in self._filterpats:
2227 if filter not in self._filterpats:
2230 l = []
2228 l = []
2231 for pat, cmd in self.ui.configitems(filter):
2229 for pat, cmd in self.ui.configitems(filter):
2232 if cmd == b'!':
2230 if cmd == b'!':
2233 continue
2231 continue
2234 mf = matchmod.match(self.root, b'', [pat])
2232 mf = matchmod.match(self.root, b'', [pat])
2235 fn = None
2233 fn = None
2236 params = cmd
2234 params = cmd
2237 for name, filterfn in pycompat.iteritems(self._datafilters):
2235 for name, filterfn in pycompat.iteritems(self._datafilters):
2238 if cmd.startswith(name):
2236 if cmd.startswith(name):
2239 fn = filterfn
2237 fn = filterfn
2240 params = cmd[len(name) :].lstrip()
2238 params = cmd[len(name) :].lstrip()
2241 break
2239 break
2242 if not fn:
2240 if not fn:
2243 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2241 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2244 fn.__name__ = 'commandfilter'
2242 fn.__name__ = 'commandfilter'
2245 # Wrap old filters not supporting keyword arguments
2243 # Wrap old filters not supporting keyword arguments
2246 if not pycompat.getargspec(fn)[2]:
2244 if not pycompat.getargspec(fn)[2]:
2247 oldfn = fn
2245 oldfn = fn
2248 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2246 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2249 fn.__name__ = 'compat-' + oldfn.__name__
2247 fn.__name__ = 'compat-' + oldfn.__name__
2250 l.append((mf, fn, params))
2248 l.append((mf, fn, params))
2251 self._filterpats[filter] = l
2249 self._filterpats[filter] = l
2252 return self._filterpats[filter]
2250 return self._filterpats[filter]
2253
2251
2254 def _filter(self, filterpats, filename, data):
2252 def _filter(self, filterpats, filename, data):
2255 for mf, fn, cmd in filterpats:
2253 for mf, fn, cmd in filterpats:
2256 if mf(filename):
2254 if mf(filename):
2257 self.ui.debug(
2255 self.ui.debug(
2258 b"filtering %s through %s\n"
2256 b"filtering %s through %s\n"
2259 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2257 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2260 )
2258 )
2261 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2259 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2262 break
2260 break
2263
2261
2264 return data
2262 return data
2265
2263
2266 @unfilteredpropertycache
2264 @unfilteredpropertycache
2267 def _encodefilterpats(self):
2265 def _encodefilterpats(self):
2268 return self._loadfilter(b'encode')
2266 return self._loadfilter(b'encode')
2269
2267
2270 @unfilteredpropertycache
2268 @unfilteredpropertycache
2271 def _decodefilterpats(self):
2269 def _decodefilterpats(self):
2272 return self._loadfilter(b'decode')
2270 return self._loadfilter(b'decode')
2273
2271
2274 def adddatafilter(self, name, filter):
2272 def adddatafilter(self, name, filter):
2275 self._datafilters[name] = filter
2273 self._datafilters[name] = filter
2276
2274
2277 def wread(self, filename):
2275 def wread(self, filename):
2278 if self.wvfs.islink(filename):
2276 if self.wvfs.islink(filename):
2279 data = self.wvfs.readlink(filename)
2277 data = self.wvfs.readlink(filename)
2280 else:
2278 else:
2281 data = self.wvfs.read(filename)
2279 data = self.wvfs.read(filename)
2282 return self._filter(self._encodefilterpats, filename, data)
2280 return self._filter(self._encodefilterpats, filename, data)
2283
2281
2284 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2282 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2285 """write ``data`` into ``filename`` in the working directory
2283 """write ``data`` into ``filename`` in the working directory
2286
2284
2287 This returns length of written (maybe decoded) data.
2285 This returns length of written (maybe decoded) data.
2288 """
2286 """
2289 data = self._filter(self._decodefilterpats, filename, data)
2287 data = self._filter(self._decodefilterpats, filename, data)
2290 if b'l' in flags:
2288 if b'l' in flags:
2291 self.wvfs.symlink(data, filename)
2289 self.wvfs.symlink(data, filename)
2292 else:
2290 else:
2293 self.wvfs.write(
2291 self.wvfs.write(
2294 filename, data, backgroundclose=backgroundclose, **kwargs
2292 filename, data, backgroundclose=backgroundclose, **kwargs
2295 )
2293 )
2296 if b'x' in flags:
2294 if b'x' in flags:
2297 self.wvfs.setflags(filename, False, True)
2295 self.wvfs.setflags(filename, False, True)
2298 else:
2296 else:
2299 self.wvfs.setflags(filename, False, False)
2297 self.wvfs.setflags(filename, False, False)
2300 return len(data)
2298 return len(data)
2301
2299
2302 def wwritedata(self, filename, data):
2300 def wwritedata(self, filename, data):
2303 return self._filter(self._decodefilterpats, filename, data)
2301 return self._filter(self._decodefilterpats, filename, data)
2304
2302
2305 def currenttransaction(self):
2303 def currenttransaction(self):
2306 """return the current transaction or None if non exists"""
2304 """return the current transaction or None if non exists"""
2307 if self._transref:
2305 if self._transref:
2308 tr = self._transref()
2306 tr = self._transref()
2309 else:
2307 else:
2310 tr = None
2308 tr = None
2311
2309
2312 if tr and tr.running():
2310 if tr and tr.running():
2313 return tr
2311 return tr
2314 return None
2312 return None
2315
2313
2316 def transaction(self, desc, report=None):
2314 def transaction(self, desc, report=None):
2317 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2315 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2318 b'devel', b'check-locks'
2316 b'devel', b'check-locks'
2319 ):
2317 ):
2320 if self._currentlock(self._lockref) is None:
2318 if self._currentlock(self._lockref) is None:
2321 raise error.ProgrammingError(b'transaction requires locking')
2319 raise error.ProgrammingError(b'transaction requires locking')
2322 tr = self.currenttransaction()
2320 tr = self.currenttransaction()
2323 if tr is not None:
2321 if tr is not None:
2324 return tr.nest(name=desc)
2322 return tr.nest(name=desc)
2325
2323
2326 # abort here if the journal already exists
2324 # abort here if the journal already exists
2327 if self.svfs.exists(b"journal"):
2325 if self.svfs.exists(b"journal"):
2328 raise error.RepoError(
2326 raise error.RepoError(
2329 _(b"abandoned transaction found"),
2327 _(b"abandoned transaction found"),
2330 hint=_(b"run 'hg recover' to clean up transaction"),
2328 hint=_(b"run 'hg recover' to clean up transaction"),
2331 )
2329 )
2332
2330
2333 idbase = b"%.40f#%f" % (random.random(), time.time())
2331 idbase = b"%.40f#%f" % (random.random(), time.time())
2334 ha = hex(hashutil.sha1(idbase).digest())
2332 ha = hex(hashutil.sha1(idbase).digest())
2335 txnid = b'TXN:' + ha
2333 txnid = b'TXN:' + ha
2336 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2334 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2337
2335
2338 self._writejournal(desc)
2336 self._writejournal(desc)
2339 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2337 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2340 if report:
2338 if report:
2341 rp = report
2339 rp = report
2342 else:
2340 else:
2343 rp = self.ui.warn
2341 rp = self.ui.warn
2344 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2342 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2345 # we must avoid cyclic reference between repo and transaction.
2343 # we must avoid cyclic reference between repo and transaction.
2346 reporef = weakref.ref(self)
2344 reporef = weakref.ref(self)
2347 # Code to track tag movement
2345 # Code to track tag movement
2348 #
2346 #
2349 # Since tags are all handled as file content, it is actually quite hard
2347 # Since tags are all handled as file content, it is actually quite hard
2350 # to track these movement from a code perspective. So we fallback to a
2348 # to track these movement from a code perspective. So we fallback to a
2351 # tracking at the repository level. One could envision to track changes
2349 # tracking at the repository level. One could envision to track changes
2352 # to the '.hgtags' file through changegroup apply but that fails to
2350 # to the '.hgtags' file through changegroup apply but that fails to
2353 # cope with case where transaction expose new heads without changegroup
2351 # cope with case where transaction expose new heads without changegroup
2354 # being involved (eg: phase movement).
2352 # being involved (eg: phase movement).
2355 #
2353 #
2356 # For now, We gate the feature behind a flag since this likely comes
2354 # For now, We gate the feature behind a flag since this likely comes
2357 # with performance impacts. The current code run more often than needed
2355 # with performance impacts. The current code run more often than needed
2358 # and do not use caches as much as it could. The current focus is on
2356 # and do not use caches as much as it could. The current focus is on
2359 # the behavior of the feature so we disable it by default. The flag
2357 # the behavior of the feature so we disable it by default. The flag
2360 # will be removed when we are happy with the performance impact.
2358 # will be removed when we are happy with the performance impact.
2361 #
2359 #
2362 # Once this feature is no longer experimental move the following
2360 # Once this feature is no longer experimental move the following
2363 # documentation to the appropriate help section:
2361 # documentation to the appropriate help section:
2364 #
2362 #
2365 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2363 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2366 # tags (new or changed or deleted tags). In addition the details of
2364 # tags (new or changed or deleted tags). In addition the details of
2367 # these changes are made available in a file at:
2365 # these changes are made available in a file at:
2368 # ``REPOROOT/.hg/changes/tags.changes``.
2366 # ``REPOROOT/.hg/changes/tags.changes``.
2369 # Make sure you check for HG_TAG_MOVED before reading that file as it
2367 # Make sure you check for HG_TAG_MOVED before reading that file as it
2370 # might exist from a previous transaction even if no tag were touched
2368 # might exist from a previous transaction even if no tag were touched
2371 # in this one. Changes are recorded in a line base format::
2369 # in this one. Changes are recorded in a line base format::
2372 #
2370 #
2373 # <action> <hex-node> <tag-name>\n
2371 # <action> <hex-node> <tag-name>\n
2374 #
2372 #
2375 # Actions are defined as follow:
2373 # Actions are defined as follow:
2376 # "-R": tag is removed,
2374 # "-R": tag is removed,
2377 # "+A": tag is added,
2375 # "+A": tag is added,
2378 # "-M": tag is moved (old value),
2376 # "-M": tag is moved (old value),
2379 # "+M": tag is moved (new value),
2377 # "+M": tag is moved (new value),
2380 tracktags = lambda x: None
2378 tracktags = lambda x: None
2381 # experimental config: experimental.hook-track-tags
2379 # experimental config: experimental.hook-track-tags
2382 shouldtracktags = self.ui.configbool(
2380 shouldtracktags = self.ui.configbool(
2383 b'experimental', b'hook-track-tags'
2381 b'experimental', b'hook-track-tags'
2384 )
2382 )
2385 if desc != b'strip' and shouldtracktags:
2383 if desc != b'strip' and shouldtracktags:
2386 oldheads = self.changelog.headrevs()
2384 oldheads = self.changelog.headrevs()
2387
2385
2388 def tracktags(tr2):
2386 def tracktags(tr2):
2389 repo = reporef()
2387 repo = reporef()
2390 assert repo is not None # help pytype
2388 assert repo is not None # help pytype
2391 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2389 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2392 newheads = repo.changelog.headrevs()
2390 newheads = repo.changelog.headrevs()
2393 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2391 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2394 # notes: we compare lists here.
2392 # notes: we compare lists here.
2395 # As we do it only once buiding set would not be cheaper
2393 # As we do it only once buiding set would not be cheaper
2396 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2394 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2397 if changes:
2395 if changes:
2398 tr2.hookargs[b'tag_moved'] = b'1'
2396 tr2.hookargs[b'tag_moved'] = b'1'
2399 with repo.vfs(
2397 with repo.vfs(
2400 b'changes/tags.changes', b'w', atomictemp=True
2398 b'changes/tags.changes', b'w', atomictemp=True
2401 ) as changesfile:
2399 ) as changesfile:
2402 # note: we do not register the file to the transaction
2400 # note: we do not register the file to the transaction
2403 # because we needs it to still exist on the transaction
2401 # because we needs it to still exist on the transaction
2404 # is close (for txnclose hooks)
2402 # is close (for txnclose hooks)
2405 tagsmod.writediff(changesfile, changes)
2403 tagsmod.writediff(changesfile, changes)
2406
2404
2407 def validate(tr2):
2405 def validate(tr2):
2408 """will run pre-closing hooks"""
2406 """will run pre-closing hooks"""
2409 # XXX the transaction API is a bit lacking here so we take a hacky
2407 # XXX the transaction API is a bit lacking here so we take a hacky
2410 # path for now
2408 # path for now
2411 #
2409 #
2412 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2410 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2413 # dict is copied before these run. In addition we needs the data
2411 # dict is copied before these run. In addition we needs the data
2414 # available to in memory hooks too.
2412 # available to in memory hooks too.
2415 #
2413 #
2416 # Moreover, we also need to make sure this runs before txnclose
2414 # Moreover, we also need to make sure this runs before txnclose
2417 # hooks and there is no "pending" mechanism that would execute
2415 # hooks and there is no "pending" mechanism that would execute
2418 # logic only if hooks are about to run.
2416 # logic only if hooks are about to run.
2419 #
2417 #
2420 # Fixing this limitation of the transaction is also needed to track
2418 # Fixing this limitation of the transaction is also needed to track
2421 # other families of changes (bookmarks, phases, obsolescence).
2419 # other families of changes (bookmarks, phases, obsolescence).
2422 #
2420 #
2423 # This will have to be fixed before we remove the experimental
2421 # This will have to be fixed before we remove the experimental
2424 # gating.
2422 # gating.
2425 tracktags(tr2)
2423 tracktags(tr2)
2426 repo = reporef()
2424 repo = reporef()
2427 assert repo is not None # help pytype
2425 assert repo is not None # help pytype
2428
2426
2429 singleheadopt = (b'experimental', b'single-head-per-branch')
2427 singleheadopt = (b'experimental', b'single-head-per-branch')
2430 singlehead = repo.ui.configbool(*singleheadopt)
2428 singlehead = repo.ui.configbool(*singleheadopt)
2431 if singlehead:
2429 if singlehead:
2432 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2430 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2433 accountclosed = singleheadsub.get(
2431 accountclosed = singleheadsub.get(
2434 b"account-closed-heads", False
2432 b"account-closed-heads", False
2435 )
2433 )
2436 if singleheadsub.get(b"public-changes-only", False):
2434 if singleheadsub.get(b"public-changes-only", False):
2437 filtername = b"immutable"
2435 filtername = b"immutable"
2438 else:
2436 else:
2439 filtername = b"visible"
2437 filtername = b"visible"
2440 scmutil.enforcesinglehead(
2438 scmutil.enforcesinglehead(
2441 repo, tr2, desc, accountclosed, filtername
2439 repo, tr2, desc, accountclosed, filtername
2442 )
2440 )
2443 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2441 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2444 for name, (old, new) in sorted(
2442 for name, (old, new) in sorted(
2445 tr.changes[b'bookmarks'].items()
2443 tr.changes[b'bookmarks'].items()
2446 ):
2444 ):
2447 args = tr.hookargs.copy()
2445 args = tr.hookargs.copy()
2448 args.update(bookmarks.preparehookargs(name, old, new))
2446 args.update(bookmarks.preparehookargs(name, old, new))
2449 repo.hook(
2447 repo.hook(
2450 b'pretxnclose-bookmark',
2448 b'pretxnclose-bookmark',
2451 throw=True,
2449 throw=True,
2452 **pycompat.strkwargs(args)
2450 **pycompat.strkwargs(args)
2453 )
2451 )
2454 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2452 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2455 cl = repo.unfiltered().changelog
2453 cl = repo.unfiltered().changelog
2456 for revs, (old, new) in tr.changes[b'phases']:
2454 for revs, (old, new) in tr.changes[b'phases']:
2457 for rev in revs:
2455 for rev in revs:
2458 args = tr.hookargs.copy()
2456 args = tr.hookargs.copy()
2459 node = hex(cl.node(rev))
2457 node = hex(cl.node(rev))
2460 args.update(phases.preparehookargs(node, old, new))
2458 args.update(phases.preparehookargs(node, old, new))
2461 repo.hook(
2459 repo.hook(
2462 b'pretxnclose-phase',
2460 b'pretxnclose-phase',
2463 throw=True,
2461 throw=True,
2464 **pycompat.strkwargs(args)
2462 **pycompat.strkwargs(args)
2465 )
2463 )
2466
2464
2467 repo.hook(
2465 repo.hook(
2468 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2466 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2469 )
2467 )
2470
2468
2471 def releasefn(tr, success):
2469 def releasefn(tr, success):
2472 repo = reporef()
2470 repo = reporef()
2473 if repo is None:
2471 if repo is None:
2474 # If the repo has been GC'd (and this release function is being
2472 # If the repo has been GC'd (and this release function is being
2475 # called from transaction.__del__), there's not much we can do,
2473 # called from transaction.__del__), there's not much we can do,
2476 # so just leave the unfinished transaction there and let the
2474 # so just leave the unfinished transaction there and let the
2477 # user run `hg recover`.
2475 # user run `hg recover`.
2478 return
2476 return
2479 if success:
2477 if success:
2480 # this should be explicitly invoked here, because
2478 # this should be explicitly invoked here, because
2481 # in-memory changes aren't written out at closing
2479 # in-memory changes aren't written out at closing
2482 # transaction, if tr.addfilegenerator (via
2480 # transaction, if tr.addfilegenerator (via
2483 # dirstate.write or so) isn't invoked while
2481 # dirstate.write or so) isn't invoked while
2484 # transaction running
2482 # transaction running
2485 repo.dirstate.write(None)
2483 repo.dirstate.write(None)
2486 else:
2484 else:
2487 # discard all changes (including ones already written
2485 # discard all changes (including ones already written
2488 # out) in this transaction
2486 # out) in this transaction
2489 narrowspec.restorebackup(self, b'journal.narrowspec')
2487 narrowspec.restorebackup(self, b'journal.narrowspec')
2490 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2488 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2491 repo.dirstate.restorebackup(None, b'journal.dirstate')
2489 repo.dirstate.restorebackup(None, b'journal.dirstate')
2492
2490
2493 repo.invalidate(clearfilecache=True)
2491 repo.invalidate(clearfilecache=True)
2494
2492
2495 tr = transaction.transaction(
2493 tr = transaction.transaction(
2496 rp,
2494 rp,
2497 self.svfs,
2495 self.svfs,
2498 vfsmap,
2496 vfsmap,
2499 b"journal",
2497 b"journal",
2500 b"undo",
2498 b"undo",
2501 aftertrans(renames),
2499 aftertrans(renames),
2502 self.store.createmode,
2500 self.store.createmode,
2503 validator=validate,
2501 validator=validate,
2504 releasefn=releasefn,
2502 releasefn=releasefn,
2505 checkambigfiles=_cachedfiles,
2503 checkambigfiles=_cachedfiles,
2506 name=desc,
2504 name=desc,
2507 )
2505 )
2508 tr.changes[b'origrepolen'] = len(self)
2506 tr.changes[b'origrepolen'] = len(self)
2509 tr.changes[b'obsmarkers'] = set()
2507 tr.changes[b'obsmarkers'] = set()
2510 tr.changes[b'phases'] = []
2508 tr.changes[b'phases'] = []
2511 tr.changes[b'bookmarks'] = {}
2509 tr.changes[b'bookmarks'] = {}
2512
2510
2513 tr.hookargs[b'txnid'] = txnid
2511 tr.hookargs[b'txnid'] = txnid
2514 tr.hookargs[b'txnname'] = desc
2512 tr.hookargs[b'txnname'] = desc
2515 tr.hookargs[b'changes'] = tr.changes
2513 tr.hookargs[b'changes'] = tr.changes
2516 # note: writing the fncache only during finalize mean that the file is
2514 # note: writing the fncache only during finalize mean that the file is
2517 # outdated when running hooks. As fncache is used for streaming clone,
2515 # outdated when running hooks. As fncache is used for streaming clone,
2518 # this is not expected to break anything that happen during the hooks.
2516 # this is not expected to break anything that happen during the hooks.
2519 tr.addfinalize(b'flush-fncache', self.store.write)
2517 tr.addfinalize(b'flush-fncache', self.store.write)
2520
2518
2521 def txnclosehook(tr2):
2519 def txnclosehook(tr2):
2522 """To be run if transaction is successful, will schedule a hook run"""
2520 """To be run if transaction is successful, will schedule a hook run"""
2523 # Don't reference tr2 in hook() so we don't hold a reference.
2521 # Don't reference tr2 in hook() so we don't hold a reference.
2524 # This reduces memory consumption when there are multiple
2522 # This reduces memory consumption when there are multiple
2525 # transactions per lock. This can likely go away if issue5045
2523 # transactions per lock. This can likely go away if issue5045
2526 # fixes the function accumulation.
2524 # fixes the function accumulation.
2527 hookargs = tr2.hookargs
2525 hookargs = tr2.hookargs
2528
2526
2529 def hookfunc(unused_success):
2527 def hookfunc(unused_success):
2530 repo = reporef()
2528 repo = reporef()
2531 assert repo is not None # help pytype
2529 assert repo is not None # help pytype
2532
2530
2533 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2531 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2534 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2532 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2535 for name, (old, new) in bmchanges:
2533 for name, (old, new) in bmchanges:
2536 args = tr.hookargs.copy()
2534 args = tr.hookargs.copy()
2537 args.update(bookmarks.preparehookargs(name, old, new))
2535 args.update(bookmarks.preparehookargs(name, old, new))
2538 repo.hook(
2536 repo.hook(
2539 b'txnclose-bookmark',
2537 b'txnclose-bookmark',
2540 throw=False,
2538 throw=False,
2541 **pycompat.strkwargs(args)
2539 **pycompat.strkwargs(args)
2542 )
2540 )
2543
2541
2544 if hook.hashook(repo.ui, b'txnclose-phase'):
2542 if hook.hashook(repo.ui, b'txnclose-phase'):
2545 cl = repo.unfiltered().changelog
2543 cl = repo.unfiltered().changelog
2546 phasemv = sorted(
2544 phasemv = sorted(
2547 tr.changes[b'phases'], key=lambda r: r[0][0]
2545 tr.changes[b'phases'], key=lambda r: r[0][0]
2548 )
2546 )
2549 for revs, (old, new) in phasemv:
2547 for revs, (old, new) in phasemv:
2550 for rev in revs:
2548 for rev in revs:
2551 args = tr.hookargs.copy()
2549 args = tr.hookargs.copy()
2552 node = hex(cl.node(rev))
2550 node = hex(cl.node(rev))
2553 args.update(phases.preparehookargs(node, old, new))
2551 args.update(phases.preparehookargs(node, old, new))
2554 repo.hook(
2552 repo.hook(
2555 b'txnclose-phase',
2553 b'txnclose-phase',
2556 throw=False,
2554 throw=False,
2557 **pycompat.strkwargs(args)
2555 **pycompat.strkwargs(args)
2558 )
2556 )
2559
2557
2560 repo.hook(
2558 repo.hook(
2561 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2559 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2562 )
2560 )
2563
2561
2564 repo = reporef()
2562 repo = reporef()
2565 assert repo is not None # help pytype
2563 assert repo is not None # help pytype
2566 repo._afterlock(hookfunc)
2564 repo._afterlock(hookfunc)
2567
2565
2568 tr.addfinalize(b'txnclose-hook', txnclosehook)
2566 tr.addfinalize(b'txnclose-hook', txnclosehook)
2569 # Include a leading "-" to make it happen before the transaction summary
2567 # Include a leading "-" to make it happen before the transaction summary
2570 # reports registered via scmutil.registersummarycallback() whose names
2568 # reports registered via scmutil.registersummarycallback() whose names
2571 # are 00-txnreport etc. That way, the caches will be warm when the
2569 # are 00-txnreport etc. That way, the caches will be warm when the
2572 # callbacks run.
2570 # callbacks run.
2573 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2571 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2574
2572
2575 def txnaborthook(tr2):
2573 def txnaborthook(tr2):
2576 """To be run if transaction is aborted"""
2574 """To be run if transaction is aborted"""
2577 repo = reporef()
2575 repo = reporef()
2578 assert repo is not None # help pytype
2576 assert repo is not None # help pytype
2579 repo.hook(
2577 repo.hook(
2580 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2578 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2581 )
2579 )
2582
2580
2583 tr.addabort(b'txnabort-hook', txnaborthook)
2581 tr.addabort(b'txnabort-hook', txnaborthook)
2584 # avoid eager cache invalidation. in-memory data should be identical
2582 # avoid eager cache invalidation. in-memory data should be identical
2585 # to stored data if transaction has no error.
2583 # to stored data if transaction has no error.
2586 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2584 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2587 self._transref = weakref.ref(tr)
2585 self._transref = weakref.ref(tr)
2588 scmutil.registersummarycallback(self, tr, desc)
2586 scmutil.registersummarycallback(self, tr, desc)
2589 return tr
2587 return tr
2590
2588
2591 def _journalfiles(self):
2589 def _journalfiles(self):
2592 return (
2590 return (
2593 (self.svfs, b'journal'),
2591 (self.svfs, b'journal'),
2594 (self.svfs, b'journal.narrowspec'),
2592 (self.svfs, b'journal.narrowspec'),
2595 (self.vfs, b'journal.narrowspec.dirstate'),
2593 (self.vfs, b'journal.narrowspec.dirstate'),
2596 (self.vfs, b'journal.dirstate'),
2594 (self.vfs, b'journal.dirstate'),
2597 (self.vfs, b'journal.branch'),
2595 (self.vfs, b'journal.branch'),
2598 (self.vfs, b'journal.desc'),
2596 (self.vfs, b'journal.desc'),
2599 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2597 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2600 (self.svfs, b'journal.phaseroots'),
2598 (self.svfs, b'journal.phaseroots'),
2601 )
2599 )
2602
2600
2603 def undofiles(self):
2601 def undofiles(self):
2604 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2602 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2605
2603
2606 @unfilteredmethod
2604 @unfilteredmethod
2607 def _writejournal(self, desc):
2605 def _writejournal(self, desc):
2608 self.dirstate.savebackup(None, b'journal.dirstate')
2606 self.dirstate.savebackup(None, b'journal.dirstate')
2609 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2607 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2610 narrowspec.savebackup(self, b'journal.narrowspec')
2608 narrowspec.savebackup(self, b'journal.narrowspec')
2611 self.vfs.write(
2609 self.vfs.write(
2612 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2610 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2613 )
2611 )
2614 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2612 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2615 bookmarksvfs = bookmarks.bookmarksvfs(self)
2613 bookmarksvfs = bookmarks.bookmarksvfs(self)
2616 bookmarksvfs.write(
2614 bookmarksvfs.write(
2617 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2615 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2618 )
2616 )
2619 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2617 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2620
2618
2621 def recover(self):
2619 def recover(self):
2622 with self.lock():
2620 with self.lock():
2623 if self.svfs.exists(b"journal"):
2621 if self.svfs.exists(b"journal"):
2624 self.ui.status(_(b"rolling back interrupted transaction\n"))
2622 self.ui.status(_(b"rolling back interrupted transaction\n"))
2625 vfsmap = {
2623 vfsmap = {
2626 b'': self.svfs,
2624 b'': self.svfs,
2627 b'plain': self.vfs,
2625 b'plain': self.vfs,
2628 }
2626 }
2629 transaction.rollback(
2627 transaction.rollback(
2630 self.svfs,
2628 self.svfs,
2631 vfsmap,
2629 vfsmap,
2632 b"journal",
2630 b"journal",
2633 self.ui.warn,
2631 self.ui.warn,
2634 checkambigfiles=_cachedfiles,
2632 checkambigfiles=_cachedfiles,
2635 )
2633 )
2636 self.invalidate()
2634 self.invalidate()
2637 return True
2635 return True
2638 else:
2636 else:
2639 self.ui.warn(_(b"no interrupted transaction available\n"))
2637 self.ui.warn(_(b"no interrupted transaction available\n"))
2640 return False
2638 return False
2641
2639
2642 def rollback(self, dryrun=False, force=False):
2640 def rollback(self, dryrun=False, force=False):
2643 wlock = lock = dsguard = None
2641 wlock = lock = dsguard = None
2644 try:
2642 try:
2645 wlock = self.wlock()
2643 wlock = self.wlock()
2646 lock = self.lock()
2644 lock = self.lock()
2647 if self.svfs.exists(b"undo"):
2645 if self.svfs.exists(b"undo"):
2648 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2646 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2649
2647
2650 return self._rollback(dryrun, force, dsguard)
2648 return self._rollback(dryrun, force, dsguard)
2651 else:
2649 else:
2652 self.ui.warn(_(b"no rollback information available\n"))
2650 self.ui.warn(_(b"no rollback information available\n"))
2653 return 1
2651 return 1
2654 finally:
2652 finally:
2655 release(dsguard, lock, wlock)
2653 release(dsguard, lock, wlock)
2656
2654
2657 @unfilteredmethod # Until we get smarter cache management
2655 @unfilteredmethod # Until we get smarter cache management
2658 def _rollback(self, dryrun, force, dsguard):
2656 def _rollback(self, dryrun, force, dsguard):
2659 ui = self.ui
2657 ui = self.ui
2660 try:
2658 try:
2661 args = self.vfs.read(b'undo.desc').splitlines()
2659 args = self.vfs.read(b'undo.desc').splitlines()
2662 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2660 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2663 if len(args) >= 3:
2661 if len(args) >= 3:
2664 detail = args[2]
2662 detail = args[2]
2665 oldtip = oldlen - 1
2663 oldtip = oldlen - 1
2666
2664
2667 if detail and ui.verbose:
2665 if detail and ui.verbose:
2668 msg = _(
2666 msg = _(
2669 b'repository tip rolled back to revision %d'
2667 b'repository tip rolled back to revision %d'
2670 b' (undo %s: %s)\n'
2668 b' (undo %s: %s)\n'
2671 ) % (oldtip, desc, detail)
2669 ) % (oldtip, desc, detail)
2672 else:
2670 else:
2673 msg = _(
2671 msg = _(
2674 b'repository tip rolled back to revision %d (undo %s)\n'
2672 b'repository tip rolled back to revision %d (undo %s)\n'
2675 ) % (oldtip, desc)
2673 ) % (oldtip, desc)
2676 except IOError:
2674 except IOError:
2677 msg = _(b'rolling back unknown transaction\n')
2675 msg = _(b'rolling back unknown transaction\n')
2678 desc = None
2676 desc = None
2679
2677
2680 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2678 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2681 raise error.Abort(
2679 raise error.Abort(
2682 _(
2680 _(
2683 b'rollback of last commit while not checked out '
2681 b'rollback of last commit while not checked out '
2684 b'may lose data'
2682 b'may lose data'
2685 ),
2683 ),
2686 hint=_(b'use -f to force'),
2684 hint=_(b'use -f to force'),
2687 )
2685 )
2688
2686
2689 ui.status(msg)
2687 ui.status(msg)
2690 if dryrun:
2688 if dryrun:
2691 return 0
2689 return 0
2692
2690
2693 parents = self.dirstate.parents()
2691 parents = self.dirstate.parents()
2694 self.destroying()
2692 self.destroying()
2695 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2693 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2696 transaction.rollback(
2694 transaction.rollback(
2697 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2695 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2698 )
2696 )
2699 bookmarksvfs = bookmarks.bookmarksvfs(self)
2697 bookmarksvfs = bookmarks.bookmarksvfs(self)
2700 if bookmarksvfs.exists(b'undo.bookmarks'):
2698 if bookmarksvfs.exists(b'undo.bookmarks'):
2701 bookmarksvfs.rename(
2699 bookmarksvfs.rename(
2702 b'undo.bookmarks', b'bookmarks', checkambig=True
2700 b'undo.bookmarks', b'bookmarks', checkambig=True
2703 )
2701 )
2704 if self.svfs.exists(b'undo.phaseroots'):
2702 if self.svfs.exists(b'undo.phaseroots'):
2705 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2703 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2706 self.invalidate()
2704 self.invalidate()
2707
2705
2708 has_node = self.changelog.index.has_node
2706 has_node = self.changelog.index.has_node
2709 parentgone = any(not has_node(p) for p in parents)
2707 parentgone = any(not has_node(p) for p in parents)
2710 if parentgone:
2708 if parentgone:
2711 # prevent dirstateguard from overwriting already restored one
2709 # prevent dirstateguard from overwriting already restored one
2712 dsguard.close()
2710 dsguard.close()
2713
2711
2714 narrowspec.restorebackup(self, b'undo.narrowspec')
2712 narrowspec.restorebackup(self, b'undo.narrowspec')
2715 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2713 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2716 self.dirstate.restorebackup(None, b'undo.dirstate')
2714 self.dirstate.restorebackup(None, b'undo.dirstate')
2717 try:
2715 try:
2718 branch = self.vfs.read(b'undo.branch')
2716 branch = self.vfs.read(b'undo.branch')
2719 self.dirstate.setbranch(encoding.tolocal(branch))
2717 self.dirstate.setbranch(encoding.tolocal(branch))
2720 except IOError:
2718 except IOError:
2721 ui.warn(
2719 ui.warn(
2722 _(
2720 _(
2723 b'named branch could not be reset: '
2721 b'named branch could not be reset: '
2724 b'current branch is still \'%s\'\n'
2722 b'current branch is still \'%s\'\n'
2725 )
2723 )
2726 % self.dirstate.branch()
2724 % self.dirstate.branch()
2727 )
2725 )
2728
2726
2729 parents = tuple([p.rev() for p in self[None].parents()])
2727 parents = tuple([p.rev() for p in self[None].parents()])
2730 if len(parents) > 1:
2728 if len(parents) > 1:
2731 ui.status(
2729 ui.status(
2732 _(
2730 _(
2733 b'working directory now based on '
2731 b'working directory now based on '
2734 b'revisions %d and %d\n'
2732 b'revisions %d and %d\n'
2735 )
2733 )
2736 % parents
2734 % parents
2737 )
2735 )
2738 else:
2736 else:
2739 ui.status(
2737 ui.status(
2740 _(b'working directory now based on revision %d\n') % parents
2738 _(b'working directory now based on revision %d\n') % parents
2741 )
2739 )
2742 mergestatemod.mergestate.clean(self)
2740 mergestatemod.mergestate.clean(self)
2743
2741
2744 # TODO: if we know which new heads may result from this rollback, pass
2742 # TODO: if we know which new heads may result from this rollback, pass
2745 # them to destroy(), which will prevent the branchhead cache from being
2743 # them to destroy(), which will prevent the branchhead cache from being
2746 # invalidated.
2744 # invalidated.
2747 self.destroyed()
2745 self.destroyed()
2748 return 0
2746 return 0
2749
2747
2750 def _buildcacheupdater(self, newtransaction):
2748 def _buildcacheupdater(self, newtransaction):
2751 """called during transaction to build the callback updating cache
2749 """called during transaction to build the callback updating cache
2752
2750
2753 Lives on the repository to help extension who might want to augment
2751 Lives on the repository to help extension who might want to augment
2754 this logic. For this purpose, the created transaction is passed to the
2752 this logic. For this purpose, the created transaction is passed to the
2755 method.
2753 method.
2756 """
2754 """
2757 # we must avoid cyclic reference between repo and transaction.
2755 # we must avoid cyclic reference between repo and transaction.
2758 reporef = weakref.ref(self)
2756 reporef = weakref.ref(self)
2759
2757
2760 def updater(tr):
2758 def updater(tr):
2761 repo = reporef()
2759 repo = reporef()
2762 assert repo is not None # help pytype
2760 assert repo is not None # help pytype
2763 repo.updatecaches(tr)
2761 repo.updatecaches(tr)
2764
2762
2765 return updater
2763 return updater
2766
2764
2767 @unfilteredmethod
2765 @unfilteredmethod
2768 def updatecaches(self, tr=None, full=False, caches=None):
2766 def updatecaches(self, tr=None, full=False, caches=None):
2769 """warm appropriate caches
2767 """warm appropriate caches
2770
2768
2771 If this function is called after a transaction closed. The transaction
2769 If this function is called after a transaction closed. The transaction
2772 will be available in the 'tr' argument. This can be used to selectively
2770 will be available in the 'tr' argument. This can be used to selectively
2773 update caches relevant to the changes in that transaction.
2771 update caches relevant to the changes in that transaction.
2774
2772
2775 If 'full' is set, make sure all caches the function knows about have
2773 If 'full' is set, make sure all caches the function knows about have
2776 up-to-date data. Even the ones usually loaded more lazily.
2774 up-to-date data. Even the ones usually loaded more lazily.
2777
2775
2778 The `full` argument can take a special "post-clone" value. In this case
2776 The `full` argument can take a special "post-clone" value. In this case
2779 the cache warming is made after a clone and of the slower cache might
2777 the cache warming is made after a clone and of the slower cache might
2780 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2778 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2781 as we plan for a cleaner way to deal with this for 5.9.
2779 as we plan for a cleaner way to deal with this for 5.9.
2782 """
2780 """
2783 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2781 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2784 # During strip, many caches are invalid but
2782 # During strip, many caches are invalid but
2785 # later call to `destroyed` will refresh them.
2783 # later call to `destroyed` will refresh them.
2786 return
2784 return
2787
2785
2788 unfi = self.unfiltered()
2786 unfi = self.unfiltered()
2789
2787
2790 if full:
2788 if full:
2791 msg = (
2789 msg = (
2792 "`full` argument for `repo.updatecaches` is deprecated\n"
2790 "`full` argument for `repo.updatecaches` is deprecated\n"
2793 "(use `caches=repository.CACHE_ALL` instead)"
2791 "(use `caches=repository.CACHE_ALL` instead)"
2794 )
2792 )
2795 self.ui.deprecwarn(msg, b"5.9")
2793 self.ui.deprecwarn(msg, b"5.9")
2796 caches = repository.CACHES_ALL
2794 caches = repository.CACHES_ALL
2797 if full == b"post-clone":
2795 if full == b"post-clone":
2798 caches = repository.CACHES_POST_CLONE
2796 caches = repository.CACHES_POST_CLONE
2799 caches = repository.CACHES_ALL
2797 caches = repository.CACHES_ALL
2800 elif caches is None:
2798 elif caches is None:
2801 caches = repository.CACHES_DEFAULT
2799 caches = repository.CACHES_DEFAULT
2802
2800
2803 if repository.CACHE_BRANCHMAP_SERVED in caches:
2801 if repository.CACHE_BRANCHMAP_SERVED in caches:
2804 if tr is None or tr.changes[b'origrepolen'] < len(self):
2802 if tr is None or tr.changes[b'origrepolen'] < len(self):
2805 # accessing the 'served' branchmap should refresh all the others,
2803 # accessing the 'served' branchmap should refresh all the others,
2806 self.ui.debug(b'updating the branch cache\n')
2804 self.ui.debug(b'updating the branch cache\n')
2807 self.filtered(b'served').branchmap()
2805 self.filtered(b'served').branchmap()
2808 self.filtered(b'served.hidden').branchmap()
2806 self.filtered(b'served.hidden').branchmap()
2809
2807
2810 if repository.CACHE_CHANGELOG_CACHE in caches:
2808 if repository.CACHE_CHANGELOG_CACHE in caches:
2811 self.changelog.update_caches(transaction=tr)
2809 self.changelog.update_caches(transaction=tr)
2812
2810
2813 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2811 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2814 self.manifestlog.update_caches(transaction=tr)
2812 self.manifestlog.update_caches(transaction=tr)
2815
2813
2816 if repository.CACHE_REV_BRANCH in caches:
2814 if repository.CACHE_REV_BRANCH in caches:
2817 rbc = unfi.revbranchcache()
2815 rbc = unfi.revbranchcache()
2818 for r in unfi.changelog:
2816 for r in unfi.changelog:
2819 rbc.branchinfo(r)
2817 rbc.branchinfo(r)
2820 rbc.write()
2818 rbc.write()
2821
2819
2822 if repository.CACHE_FULL_MANIFEST in caches:
2820 if repository.CACHE_FULL_MANIFEST in caches:
2823 # ensure the working copy parents are in the manifestfulltextcache
2821 # ensure the working copy parents are in the manifestfulltextcache
2824 for ctx in self[b'.'].parents():
2822 for ctx in self[b'.'].parents():
2825 ctx.manifest() # accessing the manifest is enough
2823 ctx.manifest() # accessing the manifest is enough
2826
2824
2827 if repository.CACHE_FILE_NODE_TAGS in caches:
2825 if repository.CACHE_FILE_NODE_TAGS in caches:
2828 # accessing fnode cache warms the cache
2826 # accessing fnode cache warms the cache
2829 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2827 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2830
2828
2831 if repository.CACHE_TAGS_DEFAULT in caches:
2829 if repository.CACHE_TAGS_DEFAULT in caches:
2832 # accessing tags warm the cache
2830 # accessing tags warm the cache
2833 self.tags()
2831 self.tags()
2834 if repository.CACHE_TAGS_SERVED in caches:
2832 if repository.CACHE_TAGS_SERVED in caches:
2835 self.filtered(b'served').tags()
2833 self.filtered(b'served').tags()
2836
2834
2837 if repository.CACHE_BRANCHMAP_ALL in caches:
2835 if repository.CACHE_BRANCHMAP_ALL in caches:
2838 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2836 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2839 # so we're forcing a write to cause these caches to be warmed up
2837 # so we're forcing a write to cause these caches to be warmed up
2840 # even if they haven't explicitly been requested yet (if they've
2838 # even if they haven't explicitly been requested yet (if they've
2841 # never been used by hg, they won't ever have been written, even if
2839 # never been used by hg, they won't ever have been written, even if
2842 # they're a subset of another kind of cache that *has* been used).
2840 # they're a subset of another kind of cache that *has* been used).
2843 for filt in repoview.filtertable.keys():
2841 for filt in repoview.filtertable.keys():
2844 filtered = self.filtered(filt)
2842 filtered = self.filtered(filt)
2845 filtered.branchmap().write(filtered)
2843 filtered.branchmap().write(filtered)
2846
2844
2847 def invalidatecaches(self):
2845 def invalidatecaches(self):
2848
2846
2849 if '_tagscache' in vars(self):
2847 if '_tagscache' in vars(self):
2850 # can't use delattr on proxy
2848 # can't use delattr on proxy
2851 del self.__dict__['_tagscache']
2849 del self.__dict__['_tagscache']
2852
2850
2853 self._branchcaches.clear()
2851 self._branchcaches.clear()
2854 self.invalidatevolatilesets()
2852 self.invalidatevolatilesets()
2855 self._sparsesignaturecache.clear()
2853 self._sparsesignaturecache.clear()
2856
2854
2857 def invalidatevolatilesets(self):
2855 def invalidatevolatilesets(self):
2858 self.filteredrevcache.clear()
2856 self.filteredrevcache.clear()
2859 obsolete.clearobscaches(self)
2857 obsolete.clearobscaches(self)
2860 self._quick_access_changeid_invalidate()
2858 self._quick_access_changeid_invalidate()
2861
2859
2862 def invalidatedirstate(self):
2860 def invalidatedirstate(self):
2863 """Invalidates the dirstate, causing the next call to dirstate
2861 """Invalidates the dirstate, causing the next call to dirstate
2864 to check if it was modified since the last time it was read,
2862 to check if it was modified since the last time it was read,
2865 rereading it if it has.
2863 rereading it if it has.
2866
2864
2867 This is different to dirstate.invalidate() that it doesn't always
2865 This is different to dirstate.invalidate() that it doesn't always
2868 rereads the dirstate. Use dirstate.invalidate() if you want to
2866 rereads the dirstate. Use dirstate.invalidate() if you want to
2869 explicitly read the dirstate again (i.e. restoring it to a previous
2867 explicitly read the dirstate again (i.e. restoring it to a previous
2870 known good state)."""
2868 known good state)."""
2871 if hasunfilteredcache(self, 'dirstate'):
2869 if hasunfilteredcache(self, 'dirstate'):
2872 for k in self.dirstate._filecache:
2870 for k in self.dirstate._filecache:
2873 try:
2871 try:
2874 delattr(self.dirstate, k)
2872 delattr(self.dirstate, k)
2875 except AttributeError:
2873 except AttributeError:
2876 pass
2874 pass
2877 delattr(self.unfiltered(), 'dirstate')
2875 delattr(self.unfiltered(), 'dirstate')
2878
2876
2879 def invalidate(self, clearfilecache=False):
2877 def invalidate(self, clearfilecache=False):
2880 """Invalidates both store and non-store parts other than dirstate
2878 """Invalidates both store and non-store parts other than dirstate
2881
2879
2882 If a transaction is running, invalidation of store is omitted,
2880 If a transaction is running, invalidation of store is omitted,
2883 because discarding in-memory changes might cause inconsistency
2881 because discarding in-memory changes might cause inconsistency
2884 (e.g. incomplete fncache causes unintentional failure, but
2882 (e.g. incomplete fncache causes unintentional failure, but
2885 redundant one doesn't).
2883 redundant one doesn't).
2886 """
2884 """
2887 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2885 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2888 for k in list(self._filecache.keys()):
2886 for k in list(self._filecache.keys()):
2889 # dirstate is invalidated separately in invalidatedirstate()
2887 # dirstate is invalidated separately in invalidatedirstate()
2890 if k == b'dirstate':
2888 if k == b'dirstate':
2891 continue
2889 continue
2892 if (
2890 if (
2893 k == b'changelog'
2891 k == b'changelog'
2894 and self.currenttransaction()
2892 and self.currenttransaction()
2895 and self.changelog._delayed
2893 and self.changelog._delayed
2896 ):
2894 ):
2897 # The changelog object may store unwritten revisions. We don't
2895 # The changelog object may store unwritten revisions. We don't
2898 # want to lose them.
2896 # want to lose them.
2899 # TODO: Solve the problem instead of working around it.
2897 # TODO: Solve the problem instead of working around it.
2900 continue
2898 continue
2901
2899
2902 if clearfilecache:
2900 if clearfilecache:
2903 del self._filecache[k]
2901 del self._filecache[k]
2904 try:
2902 try:
2905 delattr(unfiltered, k)
2903 delattr(unfiltered, k)
2906 except AttributeError:
2904 except AttributeError:
2907 pass
2905 pass
2908 self.invalidatecaches()
2906 self.invalidatecaches()
2909 if not self.currenttransaction():
2907 if not self.currenttransaction():
2910 # TODO: Changing contents of store outside transaction
2908 # TODO: Changing contents of store outside transaction
2911 # causes inconsistency. We should make in-memory store
2909 # causes inconsistency. We should make in-memory store
2912 # changes detectable, and abort if changed.
2910 # changes detectable, and abort if changed.
2913 self.store.invalidatecaches()
2911 self.store.invalidatecaches()
2914
2912
2915 def invalidateall(self):
2913 def invalidateall(self):
2916 """Fully invalidates both store and non-store parts, causing the
2914 """Fully invalidates both store and non-store parts, causing the
2917 subsequent operation to reread any outside changes."""
2915 subsequent operation to reread any outside changes."""
2918 # extension should hook this to invalidate its caches
2916 # extension should hook this to invalidate its caches
2919 self.invalidate()
2917 self.invalidate()
2920 self.invalidatedirstate()
2918 self.invalidatedirstate()
2921
2919
2922 @unfilteredmethod
2920 @unfilteredmethod
2923 def _refreshfilecachestats(self, tr):
2921 def _refreshfilecachestats(self, tr):
2924 """Reload stats of cached files so that they are flagged as valid"""
2922 """Reload stats of cached files so that they are flagged as valid"""
2925 for k, ce in self._filecache.items():
2923 for k, ce in self._filecache.items():
2926 k = pycompat.sysstr(k)
2924 k = pycompat.sysstr(k)
2927 if k == 'dirstate' or k not in self.__dict__:
2925 if k == 'dirstate' or k not in self.__dict__:
2928 continue
2926 continue
2929 ce.refresh()
2927 ce.refresh()
2930
2928
2931 def _lock(
2929 def _lock(
2932 self,
2930 self,
2933 vfs,
2931 vfs,
2934 lockname,
2932 lockname,
2935 wait,
2933 wait,
2936 releasefn,
2934 releasefn,
2937 acquirefn,
2935 acquirefn,
2938 desc,
2936 desc,
2939 ):
2937 ):
2940 timeout = 0
2938 timeout = 0
2941 warntimeout = 0
2939 warntimeout = 0
2942 if wait:
2940 if wait:
2943 timeout = self.ui.configint(b"ui", b"timeout")
2941 timeout = self.ui.configint(b"ui", b"timeout")
2944 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2942 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2945 # internal config: ui.signal-safe-lock
2943 # internal config: ui.signal-safe-lock
2946 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2944 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2947
2945
2948 l = lockmod.trylock(
2946 l = lockmod.trylock(
2949 self.ui,
2947 self.ui,
2950 vfs,
2948 vfs,
2951 lockname,
2949 lockname,
2952 timeout,
2950 timeout,
2953 warntimeout,
2951 warntimeout,
2954 releasefn=releasefn,
2952 releasefn=releasefn,
2955 acquirefn=acquirefn,
2953 acquirefn=acquirefn,
2956 desc=desc,
2954 desc=desc,
2957 signalsafe=signalsafe,
2955 signalsafe=signalsafe,
2958 )
2956 )
2959 return l
2957 return l
2960
2958
2961 def _afterlock(self, callback):
2959 def _afterlock(self, callback):
2962 """add a callback to be run when the repository is fully unlocked
2960 """add a callback to be run when the repository is fully unlocked
2963
2961
2964 The callback will be executed when the outermost lock is released
2962 The callback will be executed when the outermost lock is released
2965 (with wlock being higher level than 'lock')."""
2963 (with wlock being higher level than 'lock')."""
2966 for ref in (self._wlockref, self._lockref):
2964 for ref in (self._wlockref, self._lockref):
2967 l = ref and ref()
2965 l = ref and ref()
2968 if l and l.held:
2966 if l and l.held:
2969 l.postrelease.append(callback)
2967 l.postrelease.append(callback)
2970 break
2968 break
2971 else: # no lock have been found.
2969 else: # no lock have been found.
2972 callback(True)
2970 callback(True)
2973
2971
2974 def lock(self, wait=True):
2972 def lock(self, wait=True):
2975 """Lock the repository store (.hg/store) and return a weak reference
2973 """Lock the repository store (.hg/store) and return a weak reference
2976 to the lock. Use this before modifying the store (e.g. committing or
2974 to the lock. Use this before modifying the store (e.g. committing or
2977 stripping). If you are opening a transaction, get a lock as well.)
2975 stripping). If you are opening a transaction, get a lock as well.)
2978
2976
2979 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2977 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2980 'wlock' first to avoid a dead-lock hazard."""
2978 'wlock' first to avoid a dead-lock hazard."""
2981 l = self._currentlock(self._lockref)
2979 l = self._currentlock(self._lockref)
2982 if l is not None:
2980 if l is not None:
2983 l.lock()
2981 l.lock()
2984 return l
2982 return l
2985
2983
2986 l = self._lock(
2984 l = self._lock(
2987 vfs=self.svfs,
2985 vfs=self.svfs,
2988 lockname=b"lock",
2986 lockname=b"lock",
2989 wait=wait,
2987 wait=wait,
2990 releasefn=None,
2988 releasefn=None,
2991 acquirefn=self.invalidate,
2989 acquirefn=self.invalidate,
2992 desc=_(b'repository %s') % self.origroot,
2990 desc=_(b'repository %s') % self.origroot,
2993 )
2991 )
2994 self._lockref = weakref.ref(l)
2992 self._lockref = weakref.ref(l)
2995 return l
2993 return l
2996
2994
2997 def wlock(self, wait=True):
2995 def wlock(self, wait=True):
2998 """Lock the non-store parts of the repository (everything under
2996 """Lock the non-store parts of the repository (everything under
2999 .hg except .hg/store) and return a weak reference to the lock.
2997 .hg except .hg/store) and return a weak reference to the lock.
3000
2998
3001 Use this before modifying files in .hg.
2999 Use this before modifying files in .hg.
3002
3000
3003 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3001 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3004 'wlock' first to avoid a dead-lock hazard."""
3002 'wlock' first to avoid a dead-lock hazard."""
3005 l = self._wlockref() if self._wlockref else None
3003 l = self._wlockref() if self._wlockref else None
3006 if l is not None and l.held:
3004 if l is not None and l.held:
3007 l.lock()
3005 l.lock()
3008 return l
3006 return l
3009
3007
3010 # We do not need to check for non-waiting lock acquisition. Such
3008 # We do not need to check for non-waiting lock acquisition. Such
3011 # acquisition would not cause dead-lock as they would just fail.
3009 # acquisition would not cause dead-lock as they would just fail.
3012 if wait and (
3010 if wait and (
3013 self.ui.configbool(b'devel', b'all-warnings')
3011 self.ui.configbool(b'devel', b'all-warnings')
3014 or self.ui.configbool(b'devel', b'check-locks')
3012 or self.ui.configbool(b'devel', b'check-locks')
3015 ):
3013 ):
3016 if self._currentlock(self._lockref) is not None:
3014 if self._currentlock(self._lockref) is not None:
3017 self.ui.develwarn(b'"wlock" acquired after "lock"')
3015 self.ui.develwarn(b'"wlock" acquired after "lock"')
3018
3016
3019 def unlock():
3017 def unlock():
3020 if self.dirstate.pendingparentchange():
3018 if self.dirstate.pendingparentchange():
3021 self.dirstate.invalidate()
3019 self.dirstate.invalidate()
3022 else:
3020 else:
3023 self.dirstate.write(None)
3021 self.dirstate.write(None)
3024
3022
3025 self._filecache[b'dirstate'].refresh()
3023 self._filecache[b'dirstate'].refresh()
3026
3024
3027 l = self._lock(
3025 l = self._lock(
3028 self.vfs,
3026 self.vfs,
3029 b"wlock",
3027 b"wlock",
3030 wait,
3028 wait,
3031 unlock,
3029 unlock,
3032 self.invalidatedirstate,
3030 self.invalidatedirstate,
3033 _(b'working directory of %s') % self.origroot,
3031 _(b'working directory of %s') % self.origroot,
3034 )
3032 )
3035 self._wlockref = weakref.ref(l)
3033 self._wlockref = weakref.ref(l)
3036 return l
3034 return l
3037
3035
3038 def _currentlock(self, lockref):
3036 def _currentlock(self, lockref):
3039 """Returns the lock if it's held, or None if it's not."""
3037 """Returns the lock if it's held, or None if it's not."""
3040 if lockref is None:
3038 if lockref is None:
3041 return None
3039 return None
3042 l = lockref()
3040 l = lockref()
3043 if l is None or not l.held:
3041 if l is None or not l.held:
3044 return None
3042 return None
3045 return l
3043 return l
3046
3044
3047 def currentwlock(self):
3045 def currentwlock(self):
3048 """Returns the wlock if it's held, or None if it's not."""
3046 """Returns the wlock if it's held, or None if it's not."""
3049 return self._currentlock(self._wlockref)
3047 return self._currentlock(self._wlockref)
3050
3048
3051 def checkcommitpatterns(self, wctx, match, status, fail):
3049 def checkcommitpatterns(self, wctx, match, status, fail):
3052 """check for commit arguments that aren't committable"""
3050 """check for commit arguments that aren't committable"""
3053 if match.isexact() or match.prefix():
3051 if match.isexact() or match.prefix():
3054 matched = set(status.modified + status.added + status.removed)
3052 matched = set(status.modified + status.added + status.removed)
3055
3053
3056 for f in match.files():
3054 for f in match.files():
3057 f = self.dirstate.normalize(f)
3055 f = self.dirstate.normalize(f)
3058 if f == b'.' or f in matched or f in wctx.substate:
3056 if f == b'.' or f in matched or f in wctx.substate:
3059 continue
3057 continue
3060 if f in status.deleted:
3058 if f in status.deleted:
3061 fail(f, _(b'file not found!'))
3059 fail(f, _(b'file not found!'))
3062 # Is it a directory that exists or used to exist?
3060 # Is it a directory that exists or used to exist?
3063 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3061 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3064 d = f + b'/'
3062 d = f + b'/'
3065 for mf in matched:
3063 for mf in matched:
3066 if mf.startswith(d):
3064 if mf.startswith(d):
3067 break
3065 break
3068 else:
3066 else:
3069 fail(f, _(b"no match under directory!"))
3067 fail(f, _(b"no match under directory!"))
3070 elif f not in self.dirstate:
3068 elif f not in self.dirstate:
3071 fail(f, _(b"file not tracked!"))
3069 fail(f, _(b"file not tracked!"))
3072
3070
3073 @unfilteredmethod
3071 @unfilteredmethod
3074 def commit(
3072 def commit(
3075 self,
3073 self,
3076 text=b"",
3074 text=b"",
3077 user=None,
3075 user=None,
3078 date=None,
3076 date=None,
3079 match=None,
3077 match=None,
3080 force=False,
3078 force=False,
3081 editor=None,
3079 editor=None,
3082 extra=None,
3080 extra=None,
3083 ):
3081 ):
3084 """Add a new revision to current repository.
3082 """Add a new revision to current repository.
3085
3083
3086 Revision information is gathered from the working directory,
3084 Revision information is gathered from the working directory,
3087 match can be used to filter the committed files. If editor is
3085 match can be used to filter the committed files. If editor is
3088 supplied, it is called to get a commit message.
3086 supplied, it is called to get a commit message.
3089 """
3087 """
3090 if extra is None:
3088 if extra is None:
3091 extra = {}
3089 extra = {}
3092
3090
3093 def fail(f, msg):
3091 def fail(f, msg):
3094 raise error.InputError(b'%s: %s' % (f, msg))
3092 raise error.InputError(b'%s: %s' % (f, msg))
3095
3093
3096 if not match:
3094 if not match:
3097 match = matchmod.always()
3095 match = matchmod.always()
3098
3096
3099 if not force:
3097 if not force:
3100 match.bad = fail
3098 match.bad = fail
3101
3099
3102 # lock() for recent changelog (see issue4368)
3100 # lock() for recent changelog (see issue4368)
3103 with self.wlock(), self.lock():
3101 with self.wlock(), self.lock():
3104 wctx = self[None]
3102 wctx = self[None]
3105 merge = len(wctx.parents()) > 1
3103 merge = len(wctx.parents()) > 1
3106
3104
3107 if not force and merge and not match.always():
3105 if not force and merge and not match.always():
3108 raise error.Abort(
3106 raise error.Abort(
3109 _(
3107 _(
3110 b'cannot partially commit a merge '
3108 b'cannot partially commit a merge '
3111 b'(do not specify files or patterns)'
3109 b'(do not specify files or patterns)'
3112 )
3110 )
3113 )
3111 )
3114
3112
3115 status = self.status(match=match, clean=force)
3113 status = self.status(match=match, clean=force)
3116 if force:
3114 if force:
3117 status.modified.extend(
3115 status.modified.extend(
3118 status.clean
3116 status.clean
3119 ) # mq may commit clean files
3117 ) # mq may commit clean files
3120
3118
3121 # check subrepos
3119 # check subrepos
3122 subs, commitsubs, newstate = subrepoutil.precommit(
3120 subs, commitsubs, newstate = subrepoutil.precommit(
3123 self.ui, wctx, status, match, force=force
3121 self.ui, wctx, status, match, force=force
3124 )
3122 )
3125
3123
3126 # make sure all explicit patterns are matched
3124 # make sure all explicit patterns are matched
3127 if not force:
3125 if not force:
3128 self.checkcommitpatterns(wctx, match, status, fail)
3126 self.checkcommitpatterns(wctx, match, status, fail)
3129
3127
3130 cctx = context.workingcommitctx(
3128 cctx = context.workingcommitctx(
3131 self, status, text, user, date, extra
3129 self, status, text, user, date, extra
3132 )
3130 )
3133
3131
3134 ms = mergestatemod.mergestate.read(self)
3132 ms = mergestatemod.mergestate.read(self)
3135 mergeutil.checkunresolved(ms)
3133 mergeutil.checkunresolved(ms)
3136
3134
3137 # internal config: ui.allowemptycommit
3135 # internal config: ui.allowemptycommit
3138 if cctx.isempty() and not self.ui.configbool(
3136 if cctx.isempty() and not self.ui.configbool(
3139 b'ui', b'allowemptycommit'
3137 b'ui', b'allowemptycommit'
3140 ):
3138 ):
3141 self.ui.debug(b'nothing to commit, clearing merge state\n')
3139 self.ui.debug(b'nothing to commit, clearing merge state\n')
3142 ms.reset()
3140 ms.reset()
3143 return None
3141 return None
3144
3142
3145 if merge and cctx.deleted():
3143 if merge and cctx.deleted():
3146 raise error.Abort(_(b"cannot commit merge with missing files"))
3144 raise error.Abort(_(b"cannot commit merge with missing files"))
3147
3145
3148 if editor:
3146 if editor:
3149 cctx._text = editor(self, cctx, subs)
3147 cctx._text = editor(self, cctx, subs)
3150 edited = text != cctx._text
3148 edited = text != cctx._text
3151
3149
3152 # Save commit message in case this transaction gets rolled back
3150 # Save commit message in case this transaction gets rolled back
3153 # (e.g. by a pretxncommit hook). Leave the content alone on
3151 # (e.g. by a pretxncommit hook). Leave the content alone on
3154 # the assumption that the user will use the same editor again.
3152 # the assumption that the user will use the same editor again.
3155 msgfn = self.savecommitmessage(cctx._text)
3153 msgfn = self.savecommitmessage(cctx._text)
3156
3154
3157 # commit subs and write new state
3155 # commit subs and write new state
3158 if subs:
3156 if subs:
3159 uipathfn = scmutil.getuipathfn(self)
3157 uipathfn = scmutil.getuipathfn(self)
3160 for s in sorted(commitsubs):
3158 for s in sorted(commitsubs):
3161 sub = wctx.sub(s)
3159 sub = wctx.sub(s)
3162 self.ui.status(
3160 self.ui.status(
3163 _(b'committing subrepository %s\n')
3161 _(b'committing subrepository %s\n')
3164 % uipathfn(subrepoutil.subrelpath(sub))
3162 % uipathfn(subrepoutil.subrelpath(sub))
3165 )
3163 )
3166 sr = sub.commit(cctx._text, user, date)
3164 sr = sub.commit(cctx._text, user, date)
3167 newstate[s] = (newstate[s][0], sr)
3165 newstate[s] = (newstate[s][0], sr)
3168 subrepoutil.writestate(self, newstate)
3166 subrepoutil.writestate(self, newstate)
3169
3167
3170 p1, p2 = self.dirstate.parents()
3168 p1, p2 = self.dirstate.parents()
3171 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3169 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3172 try:
3170 try:
3173 self.hook(
3171 self.hook(
3174 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3172 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3175 )
3173 )
3176 with self.transaction(b'commit'):
3174 with self.transaction(b'commit'):
3177 ret = self.commitctx(cctx, True)
3175 ret = self.commitctx(cctx, True)
3178 # update bookmarks, dirstate and mergestate
3176 # update bookmarks, dirstate and mergestate
3179 bookmarks.update(self, [p1, p2], ret)
3177 bookmarks.update(self, [p1, p2], ret)
3180 cctx.markcommitted(ret)
3178 cctx.markcommitted(ret)
3181 ms.reset()
3179 ms.reset()
3182 except: # re-raises
3180 except: # re-raises
3183 if edited:
3181 if edited:
3184 self.ui.write(
3182 self.ui.write(
3185 _(b'note: commit message saved in %s\n') % msgfn
3183 _(b'note: commit message saved in %s\n') % msgfn
3186 )
3184 )
3187 self.ui.write(
3185 self.ui.write(
3188 _(
3186 _(
3189 b"note: use 'hg commit --logfile "
3187 b"note: use 'hg commit --logfile "
3190 b".hg/last-message.txt --edit' to reuse it\n"
3188 b".hg/last-message.txt --edit' to reuse it\n"
3191 )
3189 )
3192 )
3190 )
3193 raise
3191 raise
3194
3192
3195 def commithook(unused_success):
3193 def commithook(unused_success):
3196 # hack for command that use a temporary commit (eg: histedit)
3194 # hack for command that use a temporary commit (eg: histedit)
3197 # temporary commit got stripped before hook release
3195 # temporary commit got stripped before hook release
3198 if self.changelog.hasnode(ret):
3196 if self.changelog.hasnode(ret):
3199 self.hook(
3197 self.hook(
3200 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3198 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3201 )
3199 )
3202
3200
3203 self._afterlock(commithook)
3201 self._afterlock(commithook)
3204 return ret
3202 return ret
3205
3203
3206 @unfilteredmethod
3204 @unfilteredmethod
3207 def commitctx(self, ctx, error=False, origctx=None):
3205 def commitctx(self, ctx, error=False, origctx=None):
3208 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3206 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3209
3207
3210 @unfilteredmethod
3208 @unfilteredmethod
3211 def destroying(self):
3209 def destroying(self):
3212 """Inform the repository that nodes are about to be destroyed.
3210 """Inform the repository that nodes are about to be destroyed.
3213 Intended for use by strip and rollback, so there's a common
3211 Intended for use by strip and rollback, so there's a common
3214 place for anything that has to be done before destroying history.
3212 place for anything that has to be done before destroying history.
3215
3213
3216 This is mostly useful for saving state that is in memory and waiting
3214 This is mostly useful for saving state that is in memory and waiting
3217 to be flushed when the current lock is released. Because a call to
3215 to be flushed when the current lock is released. Because a call to
3218 destroyed is imminent, the repo will be invalidated causing those
3216 destroyed is imminent, the repo will be invalidated causing those
3219 changes to stay in memory (waiting for the next unlock), or vanish
3217 changes to stay in memory (waiting for the next unlock), or vanish
3220 completely.
3218 completely.
3221 """
3219 """
3222 # When using the same lock to commit and strip, the phasecache is left
3220 # When using the same lock to commit and strip, the phasecache is left
3223 # dirty after committing. Then when we strip, the repo is invalidated,
3221 # dirty after committing. Then when we strip, the repo is invalidated,
3224 # causing those changes to disappear.
3222 # causing those changes to disappear.
3225 if '_phasecache' in vars(self):
3223 if '_phasecache' in vars(self):
3226 self._phasecache.write()
3224 self._phasecache.write()
3227
3225
3228 @unfilteredmethod
3226 @unfilteredmethod
3229 def destroyed(self):
3227 def destroyed(self):
3230 """Inform the repository that nodes have been destroyed.
3228 """Inform the repository that nodes have been destroyed.
3231 Intended for use by strip and rollback, so there's a common
3229 Intended for use by strip and rollback, so there's a common
3232 place for anything that has to be done after destroying history.
3230 place for anything that has to be done after destroying history.
3233 """
3231 """
3234 # When one tries to:
3232 # When one tries to:
3235 # 1) destroy nodes thus calling this method (e.g. strip)
3233 # 1) destroy nodes thus calling this method (e.g. strip)
3236 # 2) use phasecache somewhere (e.g. commit)
3234 # 2) use phasecache somewhere (e.g. commit)
3237 #
3235 #
3238 # then 2) will fail because the phasecache contains nodes that were
3236 # then 2) will fail because the phasecache contains nodes that were
3239 # removed. We can either remove phasecache from the filecache,
3237 # removed. We can either remove phasecache from the filecache,
3240 # causing it to reload next time it is accessed, or simply filter
3238 # causing it to reload next time it is accessed, or simply filter
3241 # the removed nodes now and write the updated cache.
3239 # the removed nodes now and write the updated cache.
3242 self._phasecache.filterunknown(self)
3240 self._phasecache.filterunknown(self)
3243 self._phasecache.write()
3241 self._phasecache.write()
3244
3242
3245 # refresh all repository caches
3243 # refresh all repository caches
3246 self.updatecaches()
3244 self.updatecaches()
3247
3245
3248 # Ensure the persistent tag cache is updated. Doing it now
3246 # Ensure the persistent tag cache is updated. Doing it now
3249 # means that the tag cache only has to worry about destroyed
3247 # means that the tag cache only has to worry about destroyed
3250 # heads immediately after a strip/rollback. That in turn
3248 # heads immediately after a strip/rollback. That in turn
3251 # guarantees that "cachetip == currenttip" (comparing both rev
3249 # guarantees that "cachetip == currenttip" (comparing both rev
3252 # and node) always means no nodes have been added or destroyed.
3250 # and node) always means no nodes have been added or destroyed.
3253
3251
3254 # XXX this is suboptimal when qrefresh'ing: we strip the current
3252 # XXX this is suboptimal when qrefresh'ing: we strip the current
3255 # head, refresh the tag cache, then immediately add a new head.
3253 # head, refresh the tag cache, then immediately add a new head.
3256 # But I think doing it this way is necessary for the "instant
3254 # But I think doing it this way is necessary for the "instant
3257 # tag cache retrieval" case to work.
3255 # tag cache retrieval" case to work.
3258 self.invalidate()
3256 self.invalidate()
3259
3257
3260 def status(
3258 def status(
3261 self,
3259 self,
3262 node1=b'.',
3260 node1=b'.',
3263 node2=None,
3261 node2=None,
3264 match=None,
3262 match=None,
3265 ignored=False,
3263 ignored=False,
3266 clean=False,
3264 clean=False,
3267 unknown=False,
3265 unknown=False,
3268 listsubrepos=False,
3266 listsubrepos=False,
3269 ):
3267 ):
3270 '''a convenience method that calls node1.status(node2)'''
3268 '''a convenience method that calls node1.status(node2)'''
3271 return self[node1].status(
3269 return self[node1].status(
3272 node2, match, ignored, clean, unknown, listsubrepos
3270 node2, match, ignored, clean, unknown, listsubrepos
3273 )
3271 )
3274
3272
3275 def addpostdsstatus(self, ps):
3273 def addpostdsstatus(self, ps):
3276 """Add a callback to run within the wlock, at the point at which status
3274 """Add a callback to run within the wlock, at the point at which status
3277 fixups happen.
3275 fixups happen.
3278
3276
3279 On status completion, callback(wctx, status) will be called with the
3277 On status completion, callback(wctx, status) will be called with the
3280 wlock held, unless the dirstate has changed from underneath or the wlock
3278 wlock held, unless the dirstate has changed from underneath or the wlock
3281 couldn't be grabbed.
3279 couldn't be grabbed.
3282
3280
3283 Callbacks should not capture and use a cached copy of the dirstate --
3281 Callbacks should not capture and use a cached copy of the dirstate --
3284 it might change in the meanwhile. Instead, they should access the
3282 it might change in the meanwhile. Instead, they should access the
3285 dirstate via wctx.repo().dirstate.
3283 dirstate via wctx.repo().dirstate.
3286
3284
3287 This list is emptied out after each status run -- extensions should
3285 This list is emptied out after each status run -- extensions should
3288 make sure it adds to this list each time dirstate.status is called.
3286 make sure it adds to this list each time dirstate.status is called.
3289 Extensions should also make sure they don't call this for statuses
3287 Extensions should also make sure they don't call this for statuses
3290 that don't involve the dirstate.
3288 that don't involve the dirstate.
3291 """
3289 """
3292
3290
3293 # The list is located here for uniqueness reasons -- it is actually
3291 # The list is located here for uniqueness reasons -- it is actually
3294 # managed by the workingctx, but that isn't unique per-repo.
3292 # managed by the workingctx, but that isn't unique per-repo.
3295 self._postdsstatus.append(ps)
3293 self._postdsstatus.append(ps)
3296
3294
3297 def postdsstatus(self):
3295 def postdsstatus(self):
3298 """Used by workingctx to get the list of post-dirstate-status hooks."""
3296 """Used by workingctx to get the list of post-dirstate-status hooks."""
3299 return self._postdsstatus
3297 return self._postdsstatus
3300
3298
3301 def clearpostdsstatus(self):
3299 def clearpostdsstatus(self):
3302 """Used by workingctx to clear post-dirstate-status hooks."""
3300 """Used by workingctx to clear post-dirstate-status hooks."""
3303 del self._postdsstatus[:]
3301 del self._postdsstatus[:]
3304
3302
3305 def heads(self, start=None):
3303 def heads(self, start=None):
3306 if start is None:
3304 if start is None:
3307 cl = self.changelog
3305 cl = self.changelog
3308 headrevs = reversed(cl.headrevs())
3306 headrevs = reversed(cl.headrevs())
3309 return [cl.node(rev) for rev in headrevs]
3307 return [cl.node(rev) for rev in headrevs]
3310
3308
3311 heads = self.changelog.heads(start)
3309 heads = self.changelog.heads(start)
3312 # sort the output in rev descending order
3310 # sort the output in rev descending order
3313 return sorted(heads, key=self.changelog.rev, reverse=True)
3311 return sorted(heads, key=self.changelog.rev, reverse=True)
3314
3312
3315 def branchheads(self, branch=None, start=None, closed=False):
3313 def branchheads(self, branch=None, start=None, closed=False):
3316 """return a (possibly filtered) list of heads for the given branch
3314 """return a (possibly filtered) list of heads for the given branch
3317
3315
3318 Heads are returned in topological order, from newest to oldest.
3316 Heads are returned in topological order, from newest to oldest.
3319 If branch is None, use the dirstate branch.
3317 If branch is None, use the dirstate branch.
3320 If start is not None, return only heads reachable from start.
3318 If start is not None, return only heads reachable from start.
3321 If closed is True, return heads that are marked as closed as well.
3319 If closed is True, return heads that are marked as closed as well.
3322 """
3320 """
3323 if branch is None:
3321 if branch is None:
3324 branch = self[None].branch()
3322 branch = self[None].branch()
3325 branches = self.branchmap()
3323 branches = self.branchmap()
3326 if not branches.hasbranch(branch):
3324 if not branches.hasbranch(branch):
3327 return []
3325 return []
3328 # the cache returns heads ordered lowest to highest
3326 # the cache returns heads ordered lowest to highest
3329 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3327 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3330 if start is not None:
3328 if start is not None:
3331 # filter out the heads that cannot be reached from startrev
3329 # filter out the heads that cannot be reached from startrev
3332 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3330 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3333 bheads = [h for h in bheads if h in fbheads]
3331 bheads = [h for h in bheads if h in fbheads]
3334 return bheads
3332 return bheads
3335
3333
3336 def branches(self, nodes):
3334 def branches(self, nodes):
3337 if not nodes:
3335 if not nodes:
3338 nodes = [self.changelog.tip()]
3336 nodes = [self.changelog.tip()]
3339 b = []
3337 b = []
3340 for n in nodes:
3338 for n in nodes:
3341 t = n
3339 t = n
3342 while True:
3340 while True:
3343 p = self.changelog.parents(n)
3341 p = self.changelog.parents(n)
3344 if p[1] != self.nullid or p[0] == self.nullid:
3342 if p[1] != self.nullid or p[0] == self.nullid:
3345 b.append((t, n, p[0], p[1]))
3343 b.append((t, n, p[0], p[1]))
3346 break
3344 break
3347 n = p[0]
3345 n = p[0]
3348 return b
3346 return b
3349
3347
3350 def between(self, pairs):
3348 def between(self, pairs):
3351 r = []
3349 r = []
3352
3350
3353 for top, bottom in pairs:
3351 for top, bottom in pairs:
3354 n, l, i = top, [], 0
3352 n, l, i = top, [], 0
3355 f = 1
3353 f = 1
3356
3354
3357 while n != bottom and n != self.nullid:
3355 while n != bottom and n != self.nullid:
3358 p = self.changelog.parents(n)[0]
3356 p = self.changelog.parents(n)[0]
3359 if i == f:
3357 if i == f:
3360 l.append(n)
3358 l.append(n)
3361 f = f * 2
3359 f = f * 2
3362 n = p
3360 n = p
3363 i += 1
3361 i += 1
3364
3362
3365 r.append(l)
3363 r.append(l)
3366
3364
3367 return r
3365 return r
3368
3366
3369 def checkpush(self, pushop):
3367 def checkpush(self, pushop):
3370 """Extensions can override this function if additional checks have
3368 """Extensions can override this function if additional checks have
3371 to be performed before pushing, or call it if they override push
3369 to be performed before pushing, or call it if they override push
3372 command.
3370 command.
3373 """
3371 """
3374
3372
3375 @unfilteredpropertycache
3373 @unfilteredpropertycache
3376 def prepushoutgoinghooks(self):
3374 def prepushoutgoinghooks(self):
3377 """Return util.hooks consists of a pushop with repo, remote, outgoing
3375 """Return util.hooks consists of a pushop with repo, remote, outgoing
3378 methods, which are called before pushing changesets.
3376 methods, which are called before pushing changesets.
3379 """
3377 """
3380 return util.hooks()
3378 return util.hooks()
3381
3379
3382 def pushkey(self, namespace, key, old, new):
3380 def pushkey(self, namespace, key, old, new):
3383 try:
3381 try:
3384 tr = self.currenttransaction()
3382 tr = self.currenttransaction()
3385 hookargs = {}
3383 hookargs = {}
3386 if tr is not None:
3384 if tr is not None:
3387 hookargs.update(tr.hookargs)
3385 hookargs.update(tr.hookargs)
3388 hookargs = pycompat.strkwargs(hookargs)
3386 hookargs = pycompat.strkwargs(hookargs)
3389 hookargs['namespace'] = namespace
3387 hookargs['namespace'] = namespace
3390 hookargs['key'] = key
3388 hookargs['key'] = key
3391 hookargs['old'] = old
3389 hookargs['old'] = old
3392 hookargs['new'] = new
3390 hookargs['new'] = new
3393 self.hook(b'prepushkey', throw=True, **hookargs)
3391 self.hook(b'prepushkey', throw=True, **hookargs)
3394 except error.HookAbort as exc:
3392 except error.HookAbort as exc:
3395 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3393 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3396 if exc.hint:
3394 if exc.hint:
3397 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3395 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3398 return False
3396 return False
3399 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3397 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3400 ret = pushkey.push(self, namespace, key, old, new)
3398 ret = pushkey.push(self, namespace, key, old, new)
3401
3399
3402 def runhook(unused_success):
3400 def runhook(unused_success):
3403 self.hook(
3401 self.hook(
3404 b'pushkey',
3402 b'pushkey',
3405 namespace=namespace,
3403 namespace=namespace,
3406 key=key,
3404 key=key,
3407 old=old,
3405 old=old,
3408 new=new,
3406 new=new,
3409 ret=ret,
3407 ret=ret,
3410 )
3408 )
3411
3409
3412 self._afterlock(runhook)
3410 self._afterlock(runhook)
3413 return ret
3411 return ret
3414
3412
3415 def listkeys(self, namespace):
3413 def listkeys(self, namespace):
3416 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3414 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3417 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3415 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3418 values = pushkey.list(self, namespace)
3416 values = pushkey.list(self, namespace)
3419 self.hook(b'listkeys', namespace=namespace, values=values)
3417 self.hook(b'listkeys', namespace=namespace, values=values)
3420 return values
3418 return values
3421
3419
3422 def debugwireargs(self, one, two, three=None, four=None, five=None):
3420 def debugwireargs(self, one, two, three=None, four=None, five=None):
3423 '''used to test argument passing over the wire'''
3421 '''used to test argument passing over the wire'''
3424 return b"%s %s %s %s %s" % (
3422 return b"%s %s %s %s %s" % (
3425 one,
3423 one,
3426 two,
3424 two,
3427 pycompat.bytestr(three),
3425 pycompat.bytestr(three),
3428 pycompat.bytestr(four),
3426 pycompat.bytestr(four),
3429 pycompat.bytestr(five),
3427 pycompat.bytestr(five),
3430 )
3428 )
3431
3429
3432 def savecommitmessage(self, text):
3430 def savecommitmessage(self, text):
3433 fp = self.vfs(b'last-message.txt', b'wb')
3431 fp = self.vfs(b'last-message.txt', b'wb')
3434 try:
3432 try:
3435 fp.write(text)
3433 fp.write(text)
3436 finally:
3434 finally:
3437 fp.close()
3435 fp.close()
3438 return self.pathto(fp.name[len(self.root) + 1 :])
3436 return self.pathto(fp.name[len(self.root) + 1 :])
3439
3437
3440 def register_wanted_sidedata(self, category):
3438 def register_wanted_sidedata(self, category):
3441 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3439 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3442 # Only revlogv2 repos can want sidedata.
3440 # Only revlogv2 repos can want sidedata.
3443 return
3441 return
3444 self._wanted_sidedata.add(pycompat.bytestr(category))
3442 self._wanted_sidedata.add(pycompat.bytestr(category))
3445
3443
3446 def register_sidedata_computer(
3444 def register_sidedata_computer(
3447 self, kind, category, keys, computer, flags, replace=False
3445 self, kind, category, keys, computer, flags, replace=False
3448 ):
3446 ):
3449 if kind not in revlogconst.ALL_KINDS:
3447 if kind not in revlogconst.ALL_KINDS:
3450 msg = _(b"unexpected revlog kind '%s'.")
3448 msg = _(b"unexpected revlog kind '%s'.")
3451 raise error.ProgrammingError(msg % kind)
3449 raise error.ProgrammingError(msg % kind)
3452 category = pycompat.bytestr(category)
3450 category = pycompat.bytestr(category)
3453 already_registered = category in self._sidedata_computers.get(kind, [])
3451 already_registered = category in self._sidedata_computers.get(kind, [])
3454 if already_registered and not replace:
3452 if already_registered and not replace:
3455 msg = _(
3453 msg = _(
3456 b"cannot register a sidedata computer twice for category '%s'."
3454 b"cannot register a sidedata computer twice for category '%s'."
3457 )
3455 )
3458 raise error.ProgrammingError(msg % category)
3456 raise error.ProgrammingError(msg % category)
3459 if replace and not already_registered:
3457 if replace and not already_registered:
3460 msg = _(
3458 msg = _(
3461 b"cannot replace a sidedata computer that isn't registered "
3459 b"cannot replace a sidedata computer that isn't registered "
3462 b"for category '%s'."
3460 b"for category '%s'."
3463 )
3461 )
3464 raise error.ProgrammingError(msg % category)
3462 raise error.ProgrammingError(msg % category)
3465 self._sidedata_computers.setdefault(kind, {})
3463 self._sidedata_computers.setdefault(kind, {})
3466 self._sidedata_computers[kind][category] = (keys, computer, flags)
3464 self._sidedata_computers[kind][category] = (keys, computer, flags)
3467
3465
3468
3466
3469 # used to avoid circular references so destructors work
3467 # used to avoid circular references so destructors work
3470 def aftertrans(files):
3468 def aftertrans(files):
3471 renamefiles = [tuple(t) for t in files]
3469 renamefiles = [tuple(t) for t in files]
3472
3470
3473 def a():
3471 def a():
3474 for vfs, src, dest in renamefiles:
3472 for vfs, src, dest in renamefiles:
3475 # if src and dest refer to a same file, vfs.rename is a no-op,
3473 # if src and dest refer to a same file, vfs.rename is a no-op,
3476 # leaving both src and dest on disk. delete dest to make sure
3474 # leaving both src and dest on disk. delete dest to make sure
3477 # the rename couldn't be such a no-op.
3475 # the rename couldn't be such a no-op.
3478 vfs.tryunlink(dest)
3476 vfs.tryunlink(dest)
3479 try:
3477 try:
3480 vfs.rename(src, dest)
3478 vfs.rename(src, dest)
3481 except OSError as exc: # journal file does not yet exist
3479 except OSError as exc: # journal file does not yet exist
3482 if exc.errno != errno.ENOENT:
3480 if exc.errno != errno.ENOENT:
3483 raise
3481 raise
3484
3482
3485 return a
3483 return a
3486
3484
3487
3485
3488 def undoname(fn):
3486 def undoname(fn):
3489 base, name = os.path.split(fn)
3487 base, name = os.path.split(fn)
3490 assert name.startswith(b'journal')
3488 assert name.startswith(b'journal')
3491 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3489 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3492
3490
3493
3491
3494 def instance(ui, path, create, intents=None, createopts=None):
3492 def instance(ui, path, create, intents=None, createopts=None):
3495 localpath = urlutil.urllocalpath(path)
3493 localpath = urlutil.urllocalpath(path)
3496 if create:
3494 if create:
3497 createrepository(ui, localpath, createopts=createopts)
3495 createrepository(ui, localpath, createopts=createopts)
3498
3496
3499 return makelocalrepository(ui, localpath, intents=intents)
3497 return makelocalrepository(ui, localpath, intents=intents)
3500
3498
3501
3499
3502 def islocal(path):
3500 def islocal(path):
3503 return True
3501 return True
3504
3502
3505
3503
3506 def defaultcreateopts(ui, createopts=None):
3504 def defaultcreateopts(ui, createopts=None):
3507 """Populate the default creation options for a repository.
3505 """Populate the default creation options for a repository.
3508
3506
3509 A dictionary of explicitly requested creation options can be passed
3507 A dictionary of explicitly requested creation options can be passed
3510 in. Missing keys will be populated.
3508 in. Missing keys will be populated.
3511 """
3509 """
3512 createopts = dict(createopts or {})
3510 createopts = dict(createopts or {})
3513
3511
3514 if b'backend' not in createopts:
3512 if b'backend' not in createopts:
3515 # experimental config: storage.new-repo-backend
3513 # experimental config: storage.new-repo-backend
3516 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3514 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3517
3515
3518 return createopts
3516 return createopts
3519
3517
3520
3518
3521 def clone_requirements(ui, createopts, srcrepo):
3519 def clone_requirements(ui, createopts, srcrepo):
3522 """clone the requirements of a local repo for a local clone
3520 """clone the requirements of a local repo for a local clone
3523
3521
3524 The store requirements are unchanged while the working copy requirements
3522 The store requirements are unchanged while the working copy requirements
3525 depends on the configuration
3523 depends on the configuration
3526 """
3524 """
3527 target_requirements = set()
3525 target_requirements = set()
3528 createopts = defaultcreateopts(ui, createopts=createopts)
3526 createopts = defaultcreateopts(ui, createopts=createopts)
3529 for r in newreporequirements(ui, createopts):
3527 for r in newreporequirements(ui, createopts):
3530 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3528 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3531 target_requirements.add(r)
3529 target_requirements.add(r)
3532
3530
3533 for r in srcrepo.requirements:
3531 for r in srcrepo.requirements:
3534 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3532 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3535 target_requirements.add(r)
3533 target_requirements.add(r)
3536 return target_requirements
3534 return target_requirements
3537
3535
3538
3536
3539 def newreporequirements(ui, createopts):
3537 def newreporequirements(ui, createopts):
3540 """Determine the set of requirements for a new local repository.
3538 """Determine the set of requirements for a new local repository.
3541
3539
3542 Extensions can wrap this function to specify custom requirements for
3540 Extensions can wrap this function to specify custom requirements for
3543 new repositories.
3541 new repositories.
3544 """
3542 """
3545 # If the repo is being created from a shared repository, we copy
3543 # If the repo is being created from a shared repository, we copy
3546 # its requirements.
3544 # its requirements.
3547 if b'sharedrepo' in createopts:
3545 if b'sharedrepo' in createopts:
3548 requirements = set(createopts[b'sharedrepo'].requirements)
3546 requirements = set(createopts[b'sharedrepo'].requirements)
3549 if createopts.get(b'sharedrelative'):
3547 if createopts.get(b'sharedrelative'):
3550 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3548 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3551 else:
3549 else:
3552 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3550 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3553
3551
3554 return requirements
3552 return requirements
3555
3553
3556 if b'backend' not in createopts:
3554 if b'backend' not in createopts:
3557 raise error.ProgrammingError(
3555 raise error.ProgrammingError(
3558 b'backend key not present in createopts; '
3556 b'backend key not present in createopts; '
3559 b'was defaultcreateopts() called?'
3557 b'was defaultcreateopts() called?'
3560 )
3558 )
3561
3559
3562 if createopts[b'backend'] != b'revlogv1':
3560 if createopts[b'backend'] != b'revlogv1':
3563 raise error.Abort(
3561 raise error.Abort(
3564 _(
3562 _(
3565 b'unable to determine repository requirements for '
3563 b'unable to determine repository requirements for '
3566 b'storage backend: %s'
3564 b'storage backend: %s'
3567 )
3565 )
3568 % createopts[b'backend']
3566 % createopts[b'backend']
3569 )
3567 )
3570
3568
3571 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3569 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3572 if ui.configbool(b'format', b'usestore'):
3570 if ui.configbool(b'format', b'usestore'):
3573 requirements.add(requirementsmod.STORE_REQUIREMENT)
3571 requirements.add(requirementsmod.STORE_REQUIREMENT)
3574 if ui.configbool(b'format', b'usefncache'):
3572 if ui.configbool(b'format', b'usefncache'):
3575 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3573 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3576 if ui.configbool(b'format', b'dotencode'):
3574 if ui.configbool(b'format', b'dotencode'):
3577 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3575 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3578
3576
3579 compengines = ui.configlist(b'format', b'revlog-compression')
3577 compengines = ui.configlist(b'format', b'revlog-compression')
3580 for compengine in compengines:
3578 for compengine in compengines:
3581 if compengine in util.compengines:
3579 if compengine in util.compengines:
3582 engine = util.compengines[compengine]
3580 engine = util.compengines[compengine]
3583 if engine.available() and engine.revlogheader():
3581 if engine.available() and engine.revlogheader():
3584 break
3582 break
3585 else:
3583 else:
3586 raise error.Abort(
3584 raise error.Abort(
3587 _(
3585 _(
3588 b'compression engines %s defined by '
3586 b'compression engines %s defined by '
3589 b'format.revlog-compression not available'
3587 b'format.revlog-compression not available'
3590 )
3588 )
3591 % b', '.join(b'"%s"' % e for e in compengines),
3589 % b', '.join(b'"%s"' % e for e in compengines),
3592 hint=_(
3590 hint=_(
3593 b'run "hg debuginstall" to list available '
3591 b'run "hg debuginstall" to list available '
3594 b'compression engines'
3592 b'compression engines'
3595 ),
3593 ),
3596 )
3594 )
3597
3595
3598 # zlib is the historical default and doesn't need an explicit requirement.
3596 # zlib is the historical default and doesn't need an explicit requirement.
3599 if compengine == b'zstd':
3597 if compengine == b'zstd':
3600 requirements.add(b'revlog-compression-zstd')
3598 requirements.add(b'revlog-compression-zstd')
3601 elif compengine != b'zlib':
3599 elif compengine != b'zlib':
3602 requirements.add(b'exp-compression-%s' % compengine)
3600 requirements.add(b'exp-compression-%s' % compengine)
3603
3601
3604 if scmutil.gdinitconfig(ui):
3602 if scmutil.gdinitconfig(ui):
3605 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3603 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3606 if ui.configbool(b'format', b'sparse-revlog'):
3604 if ui.configbool(b'format', b'sparse-revlog'):
3607 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3605 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3608
3606
3609 # experimental config: format.exp-dirstate-v2
3607 # experimental config: format.exp-dirstate-v2
3610 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3608 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3611 if ui.configbool(b'format', b'exp-dirstate-v2'):
3609 if ui.configbool(b'format', b'exp-dirstate-v2'):
3612 if dirstate.SUPPORTS_DIRSTATE_V2:
3610 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3613 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3614 else:
3615 raise error.Abort(
3616 _(
3617 b"dirstate v2 format requested by config "
3618 b"but not supported (requires Rust extensions)"
3619 )
3620 )
3621
3611
3622 # experimental config: format.exp-use-copies-side-data-changeset
3612 # experimental config: format.exp-use-copies-side-data-changeset
3623 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3613 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3624 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3614 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3625 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3615 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3626 if ui.configbool(b'experimental', b'treemanifest'):
3616 if ui.configbool(b'experimental', b'treemanifest'):
3627 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3617 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3628
3618
3629 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3619 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3630 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3620 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3631 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3621 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3632
3622
3633 revlogv2 = ui.config(b'experimental', b'revlogv2')
3623 revlogv2 = ui.config(b'experimental', b'revlogv2')
3634 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3624 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3635 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3625 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3636 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3626 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3637 # experimental config: format.internal-phase
3627 # experimental config: format.internal-phase
3638 if ui.configbool(b'format', b'internal-phase'):
3628 if ui.configbool(b'format', b'internal-phase'):
3639 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3629 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3640
3630
3641 if createopts.get(b'narrowfiles'):
3631 if createopts.get(b'narrowfiles'):
3642 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3632 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3643
3633
3644 if createopts.get(b'lfs'):
3634 if createopts.get(b'lfs'):
3645 requirements.add(b'lfs')
3635 requirements.add(b'lfs')
3646
3636
3647 if ui.configbool(b'format', b'bookmarks-in-store'):
3637 if ui.configbool(b'format', b'bookmarks-in-store'):
3648 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3638 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3649
3639
3650 if ui.configbool(b'format', b'use-persistent-nodemap'):
3640 if ui.configbool(b'format', b'use-persistent-nodemap'):
3651 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3641 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3652
3642
3653 # if share-safe is enabled, let's create the new repository with the new
3643 # if share-safe is enabled, let's create the new repository with the new
3654 # requirement
3644 # requirement
3655 if ui.configbool(b'format', b'use-share-safe'):
3645 if ui.configbool(b'format', b'use-share-safe'):
3656 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3646 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3657
3647
3658 return requirements
3648 return requirements
3659
3649
3660
3650
3661 def checkrequirementscompat(ui, requirements):
3651 def checkrequirementscompat(ui, requirements):
3662 """Checks compatibility of repository requirements enabled and disabled.
3652 """Checks compatibility of repository requirements enabled and disabled.
3663
3653
3664 Returns a set of requirements which needs to be dropped because dependend
3654 Returns a set of requirements which needs to be dropped because dependend
3665 requirements are not enabled. Also warns users about it"""
3655 requirements are not enabled. Also warns users about it"""
3666
3656
3667 dropped = set()
3657 dropped = set()
3668
3658
3669 if requirementsmod.STORE_REQUIREMENT not in requirements:
3659 if requirementsmod.STORE_REQUIREMENT not in requirements:
3670 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3660 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3671 ui.warn(
3661 ui.warn(
3672 _(
3662 _(
3673 b'ignoring enabled \'format.bookmarks-in-store\' config '
3663 b'ignoring enabled \'format.bookmarks-in-store\' config '
3674 b'beacuse it is incompatible with disabled '
3664 b'beacuse it is incompatible with disabled '
3675 b'\'format.usestore\' config\n'
3665 b'\'format.usestore\' config\n'
3676 )
3666 )
3677 )
3667 )
3678 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3668 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3679
3669
3680 if (
3670 if (
3681 requirementsmod.SHARED_REQUIREMENT in requirements
3671 requirementsmod.SHARED_REQUIREMENT in requirements
3682 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3672 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3683 ):
3673 ):
3684 raise error.Abort(
3674 raise error.Abort(
3685 _(
3675 _(
3686 b"cannot create shared repository as source was created"
3676 b"cannot create shared repository as source was created"
3687 b" with 'format.usestore' config disabled"
3677 b" with 'format.usestore' config disabled"
3688 )
3678 )
3689 )
3679 )
3690
3680
3691 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3681 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3692 ui.warn(
3682 ui.warn(
3693 _(
3683 _(
3694 b"ignoring enabled 'format.use-share-safe' config because "
3684 b"ignoring enabled 'format.use-share-safe' config because "
3695 b"it is incompatible with disabled 'format.usestore'"
3685 b"it is incompatible with disabled 'format.usestore'"
3696 b" config\n"
3686 b" config\n"
3697 )
3687 )
3698 )
3688 )
3699 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3689 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3700
3690
3701 return dropped
3691 return dropped
3702
3692
3703
3693
3704 def filterknowncreateopts(ui, createopts):
3694 def filterknowncreateopts(ui, createopts):
3705 """Filters a dict of repo creation options against options that are known.
3695 """Filters a dict of repo creation options against options that are known.
3706
3696
3707 Receives a dict of repo creation options and returns a dict of those
3697 Receives a dict of repo creation options and returns a dict of those
3708 options that we don't know how to handle.
3698 options that we don't know how to handle.
3709
3699
3710 This function is called as part of repository creation. If the
3700 This function is called as part of repository creation. If the
3711 returned dict contains any items, repository creation will not
3701 returned dict contains any items, repository creation will not
3712 be allowed, as it means there was a request to create a repository
3702 be allowed, as it means there was a request to create a repository
3713 with options not recognized by loaded code.
3703 with options not recognized by loaded code.
3714
3704
3715 Extensions can wrap this function to filter out creation options
3705 Extensions can wrap this function to filter out creation options
3716 they know how to handle.
3706 they know how to handle.
3717 """
3707 """
3718 known = {
3708 known = {
3719 b'backend',
3709 b'backend',
3720 b'lfs',
3710 b'lfs',
3721 b'narrowfiles',
3711 b'narrowfiles',
3722 b'sharedrepo',
3712 b'sharedrepo',
3723 b'sharedrelative',
3713 b'sharedrelative',
3724 b'shareditems',
3714 b'shareditems',
3725 b'shallowfilestore',
3715 b'shallowfilestore',
3726 }
3716 }
3727
3717
3728 return {k: v for k, v in createopts.items() if k not in known}
3718 return {k: v for k, v in createopts.items() if k not in known}
3729
3719
3730
3720
3731 def createrepository(ui, path, createopts=None, requirements=None):
3721 def createrepository(ui, path, createopts=None, requirements=None):
3732 """Create a new repository in a vfs.
3722 """Create a new repository in a vfs.
3733
3723
3734 ``path`` path to the new repo's working directory.
3724 ``path`` path to the new repo's working directory.
3735 ``createopts`` options for the new repository.
3725 ``createopts`` options for the new repository.
3736 ``requirement`` predefined set of requirements.
3726 ``requirement`` predefined set of requirements.
3737 (incompatible with ``createopts``)
3727 (incompatible with ``createopts``)
3738
3728
3739 The following keys for ``createopts`` are recognized:
3729 The following keys for ``createopts`` are recognized:
3740
3730
3741 backend
3731 backend
3742 The storage backend to use.
3732 The storage backend to use.
3743 lfs
3733 lfs
3744 Repository will be created with ``lfs`` requirement. The lfs extension
3734 Repository will be created with ``lfs`` requirement. The lfs extension
3745 will automatically be loaded when the repository is accessed.
3735 will automatically be loaded when the repository is accessed.
3746 narrowfiles
3736 narrowfiles
3747 Set up repository to support narrow file storage.
3737 Set up repository to support narrow file storage.
3748 sharedrepo
3738 sharedrepo
3749 Repository object from which storage should be shared.
3739 Repository object from which storage should be shared.
3750 sharedrelative
3740 sharedrelative
3751 Boolean indicating if the path to the shared repo should be
3741 Boolean indicating if the path to the shared repo should be
3752 stored as relative. By default, the pointer to the "parent" repo
3742 stored as relative. By default, the pointer to the "parent" repo
3753 is stored as an absolute path.
3743 is stored as an absolute path.
3754 shareditems
3744 shareditems
3755 Set of items to share to the new repository (in addition to storage).
3745 Set of items to share to the new repository (in addition to storage).
3756 shallowfilestore
3746 shallowfilestore
3757 Indicates that storage for files should be shallow (not all ancestor
3747 Indicates that storage for files should be shallow (not all ancestor
3758 revisions are known).
3748 revisions are known).
3759 """
3749 """
3760
3750
3761 if requirements is not None:
3751 if requirements is not None:
3762 if createopts is not None:
3752 if createopts is not None:
3763 msg = b'cannot specify both createopts and requirements'
3753 msg = b'cannot specify both createopts and requirements'
3764 raise error.ProgrammingError(msg)
3754 raise error.ProgrammingError(msg)
3765 createopts = {}
3755 createopts = {}
3766 else:
3756 else:
3767 createopts = defaultcreateopts(ui, createopts=createopts)
3757 createopts = defaultcreateopts(ui, createopts=createopts)
3768
3758
3769 unknownopts = filterknowncreateopts(ui, createopts)
3759 unknownopts = filterknowncreateopts(ui, createopts)
3770
3760
3771 if not isinstance(unknownopts, dict):
3761 if not isinstance(unknownopts, dict):
3772 raise error.ProgrammingError(
3762 raise error.ProgrammingError(
3773 b'filterknowncreateopts() did not return a dict'
3763 b'filterknowncreateopts() did not return a dict'
3774 )
3764 )
3775
3765
3776 if unknownopts:
3766 if unknownopts:
3777 raise error.Abort(
3767 raise error.Abort(
3778 _(
3768 _(
3779 b'unable to create repository because of unknown '
3769 b'unable to create repository because of unknown '
3780 b'creation option: %s'
3770 b'creation option: %s'
3781 )
3771 )
3782 % b', '.join(sorted(unknownopts)),
3772 % b', '.join(sorted(unknownopts)),
3783 hint=_(b'is a required extension not loaded?'),
3773 hint=_(b'is a required extension not loaded?'),
3784 )
3774 )
3785
3775
3786 requirements = newreporequirements(ui, createopts=createopts)
3776 requirements = newreporequirements(ui, createopts=createopts)
3787 requirements -= checkrequirementscompat(ui, requirements)
3777 requirements -= checkrequirementscompat(ui, requirements)
3788
3778
3789 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3779 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3790
3780
3791 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3781 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3792 if hgvfs.exists():
3782 if hgvfs.exists():
3793 raise error.RepoError(_(b'repository %s already exists') % path)
3783 raise error.RepoError(_(b'repository %s already exists') % path)
3794
3784
3795 if b'sharedrepo' in createopts:
3785 if b'sharedrepo' in createopts:
3796 sharedpath = createopts[b'sharedrepo'].sharedpath
3786 sharedpath = createopts[b'sharedrepo'].sharedpath
3797
3787
3798 if createopts.get(b'sharedrelative'):
3788 if createopts.get(b'sharedrelative'):
3799 try:
3789 try:
3800 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3790 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3801 sharedpath = util.pconvert(sharedpath)
3791 sharedpath = util.pconvert(sharedpath)
3802 except (IOError, ValueError) as e:
3792 except (IOError, ValueError) as e:
3803 # ValueError is raised on Windows if the drive letters differ
3793 # ValueError is raised on Windows if the drive letters differ
3804 # on each path.
3794 # on each path.
3805 raise error.Abort(
3795 raise error.Abort(
3806 _(b'cannot calculate relative path'),
3796 _(b'cannot calculate relative path'),
3807 hint=stringutil.forcebytestr(e),
3797 hint=stringutil.forcebytestr(e),
3808 )
3798 )
3809
3799
3810 if not wdirvfs.exists():
3800 if not wdirvfs.exists():
3811 wdirvfs.makedirs()
3801 wdirvfs.makedirs()
3812
3802
3813 hgvfs.makedir(notindexed=True)
3803 hgvfs.makedir(notindexed=True)
3814 if b'sharedrepo' not in createopts:
3804 if b'sharedrepo' not in createopts:
3815 hgvfs.mkdir(b'cache')
3805 hgvfs.mkdir(b'cache')
3816 hgvfs.mkdir(b'wcache')
3806 hgvfs.mkdir(b'wcache')
3817
3807
3818 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3808 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3819 if has_store and b'sharedrepo' not in createopts:
3809 if has_store and b'sharedrepo' not in createopts:
3820 hgvfs.mkdir(b'store')
3810 hgvfs.mkdir(b'store')
3821
3811
3822 # We create an invalid changelog outside the store so very old
3812 # We create an invalid changelog outside the store so very old
3823 # Mercurial versions (which didn't know about the requirements
3813 # Mercurial versions (which didn't know about the requirements
3824 # file) encounter an error on reading the changelog. This
3814 # file) encounter an error on reading the changelog. This
3825 # effectively locks out old clients and prevents them from
3815 # effectively locks out old clients and prevents them from
3826 # mucking with a repo in an unknown format.
3816 # mucking with a repo in an unknown format.
3827 #
3817 #
3828 # The revlog header has version 65535, which won't be recognized by
3818 # The revlog header has version 65535, which won't be recognized by
3829 # such old clients.
3819 # such old clients.
3830 hgvfs.append(
3820 hgvfs.append(
3831 b'00changelog.i',
3821 b'00changelog.i',
3832 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3822 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3833 b'layout',
3823 b'layout',
3834 )
3824 )
3835
3825
3836 # Filter the requirements into working copy and store ones
3826 # Filter the requirements into working copy and store ones
3837 wcreq, storereq = scmutil.filterrequirements(requirements)
3827 wcreq, storereq = scmutil.filterrequirements(requirements)
3838 # write working copy ones
3828 # write working copy ones
3839 scmutil.writerequires(hgvfs, wcreq)
3829 scmutil.writerequires(hgvfs, wcreq)
3840 # If there are store requirements and the current repository
3830 # If there are store requirements and the current repository
3841 # is not a shared one, write stored requirements
3831 # is not a shared one, write stored requirements
3842 # For new shared repository, we don't need to write the store
3832 # For new shared repository, we don't need to write the store
3843 # requirements as they are already present in store requires
3833 # requirements as they are already present in store requires
3844 if storereq and b'sharedrepo' not in createopts:
3834 if storereq and b'sharedrepo' not in createopts:
3845 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3835 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3846 scmutil.writerequires(storevfs, storereq)
3836 scmutil.writerequires(storevfs, storereq)
3847
3837
3848 # Write out file telling readers where to find the shared store.
3838 # Write out file telling readers where to find the shared store.
3849 if b'sharedrepo' in createopts:
3839 if b'sharedrepo' in createopts:
3850 hgvfs.write(b'sharedpath', sharedpath)
3840 hgvfs.write(b'sharedpath', sharedpath)
3851
3841
3852 if createopts.get(b'shareditems'):
3842 if createopts.get(b'shareditems'):
3853 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3843 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3854 hgvfs.write(b'shared', shared)
3844 hgvfs.write(b'shared', shared)
3855
3845
3856
3846
3857 def poisonrepository(repo):
3847 def poisonrepository(repo):
3858 """Poison a repository instance so it can no longer be used."""
3848 """Poison a repository instance so it can no longer be used."""
3859 # Perform any cleanup on the instance.
3849 # Perform any cleanup on the instance.
3860 repo.close()
3850 repo.close()
3861
3851
3862 # Our strategy is to replace the type of the object with one that
3852 # Our strategy is to replace the type of the object with one that
3863 # has all attribute lookups result in error.
3853 # has all attribute lookups result in error.
3864 #
3854 #
3865 # But we have to allow the close() method because some constructors
3855 # But we have to allow the close() method because some constructors
3866 # of repos call close() on repo references.
3856 # of repos call close() on repo references.
3867 class poisonedrepository(object):
3857 class poisonedrepository(object):
3868 def __getattribute__(self, item):
3858 def __getattribute__(self, item):
3869 if item == 'close':
3859 if item == 'close':
3870 return object.__getattribute__(self, item)
3860 return object.__getattribute__(self, item)
3871
3861
3872 raise error.ProgrammingError(
3862 raise error.ProgrammingError(
3873 b'repo instances should not be used after unshare'
3863 b'repo instances should not be used after unshare'
3874 )
3864 )
3875
3865
3876 def close(self):
3866 def close(self):
3877 pass
3867 pass
3878
3868
3879 # We may have a repoview, which intercepts __setattr__. So be sure
3869 # We may have a repoview, which intercepts __setattr__. So be sure
3880 # we operate at the lowest level possible.
3870 # we operate at the lowest level possible.
3881 object.__setattr__(repo, '__class__', poisonedrepository)
3871 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,244 +1,243 b''
1 #testcases dirstate-v1 dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
2
3 #if dirstate-v2
3 #if dirstate-v2
4 #require rust
5 $ echo '[format]' >> $HGRCPATH
4 $ echo '[format]' >> $HGRCPATH
6 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
5 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
7 #endif
6 #endif
8
7
9 $ hg init repo
8 $ hg init repo
10 $ cd repo
9 $ cd repo
11 $ echo a > a
10 $ echo a > a
12 $ hg add a
11 $ hg add a
13 $ hg commit -m test
12 $ hg commit -m test
14
13
15 Do we ever miss a sub-second change?:
14 Do we ever miss a sub-second change?:
16
15
17 $ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
16 $ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
18 > hg co -qC 0
17 > hg co -qC 0
19 > echo b > a
18 > echo b > a
20 > hg st
19 > hg st
21 > done
20 > done
22 M a
21 M a
23 M a
22 M a
24 M a
23 M a
25 M a
24 M a
26 M a
25 M a
27 M a
26 M a
28 M a
27 M a
29 M a
28 M a
30 M a
29 M a
31 M a
30 M a
32 M a
31 M a
33 M a
32 M a
34 M a
33 M a
35 M a
34 M a
36 M a
35 M a
37 M a
36 M a
38 M a
37 M a
39 M a
38 M a
40 M a
39 M a
41 M a
40 M a
42
41
43 $ echo test > b
42 $ echo test > b
44 $ mkdir dir1
43 $ mkdir dir1
45 $ echo test > dir1/c
44 $ echo test > dir1/c
46 $ echo test > d
45 $ echo test > d
47
46
48 $ echo test > e
47 $ echo test > e
49 #if execbit
48 #if execbit
50 A directory will typically have the execute bit -- make sure it doesn't get
49 A directory will typically have the execute bit -- make sure it doesn't get
51 confused with a file with the exec bit set
50 confused with a file with the exec bit set
52 $ chmod +x e
51 $ chmod +x e
53 #endif
52 #endif
54
53
55 $ hg add b dir1 d e
54 $ hg add b dir1 d e
56 adding dir1/c
55 adding dir1/c
57 $ hg commit -m test2
56 $ hg commit -m test2
58
57
59 $ cat >> $TESTTMP/dirstaterace.py << EOF
58 $ cat >> $TESTTMP/dirstaterace.py << EOF
60 > from mercurial import (
59 > from mercurial import (
61 > context,
60 > context,
62 > extensions,
61 > extensions,
63 > )
62 > )
64 > def extsetup(ui):
63 > def extsetup(ui):
65 > extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
64 > extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
66 > def overridechecklookup(orig, self, files):
65 > def overridechecklookup(orig, self, files):
67 > # make an update that changes the dirstate from underneath
66 > # make an update that changes the dirstate from underneath
68 > self._repo.ui.system(br"sh '$TESTTMP/dirstaterace.sh'",
67 > self._repo.ui.system(br"sh '$TESTTMP/dirstaterace.sh'",
69 > cwd=self._repo.root)
68 > cwd=self._repo.root)
70 > return orig(self, files)
69 > return orig(self, files)
71 > EOF
70 > EOF
72
71
73 $ hg debugrebuilddirstate
72 $ hg debugrebuilddirstate
74 $ hg debugdirstate
73 $ hg debugdirstate
75 n 0 -1 unset a
74 n 0 -1 unset a
76 n 0 -1 unset b
75 n 0 -1 unset b
77 n 0 -1 unset d
76 n 0 -1 unset d
78 n 0 -1 unset dir1/c
77 n 0 -1 unset dir1/c
79 n 0 -1 unset e
78 n 0 -1 unset e
80
79
81 XXX Note that this returns M for files that got replaced by directories. This is
80 XXX Note that this returns M for files that got replaced by directories. This is
82 definitely a bug, but the fix for that is hard and the next status run is fine
81 definitely a bug, but the fix for that is hard and the next status run is fine
83 anyway.
82 anyway.
84
83
85 $ cat > $TESTTMP/dirstaterace.sh <<EOF
84 $ cat > $TESTTMP/dirstaterace.sh <<EOF
86 > rm b && rm -r dir1 && rm d && mkdir d && rm e && mkdir e
85 > rm b && rm -r dir1 && rm d && mkdir d && rm e && mkdir e
87 > EOF
86 > EOF
88
87
89 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py
88 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py
90 M d
89 M d
91 M e
90 M e
92 ! b
91 ! b
93 ! dir1/c
92 ! dir1/c
94 $ hg debugdirstate
93 $ hg debugdirstate
95 n 644 2 * a (glob)
94 n 644 2 * a (glob)
96 n 0 -1 unset b
95 n 0 -1 unset b
97 n 0 -1 unset d
96 n 0 -1 unset d
98 n 0 -1 unset dir1/c
97 n 0 -1 unset dir1/c
99 n 0 -1 unset e
98 n 0 -1 unset e
100
99
101 $ hg status
100 $ hg status
102 ! b
101 ! b
103 ! d
102 ! d
104 ! dir1/c
103 ! dir1/c
105 ! e
104 ! e
106
105
107 $ rmdir d e
106 $ rmdir d e
108 $ hg update -C -q .
107 $ hg update -C -q .
109
108
110 Test that dirstate changes aren't written out at the end of "hg
109 Test that dirstate changes aren't written out at the end of "hg
111 status", if .hg/dirstate is already changed simultaneously before
110 status", if .hg/dirstate is already changed simultaneously before
112 acquisition of wlock in workingctx._poststatusfixup().
111 acquisition of wlock in workingctx._poststatusfixup().
113
112
114 This avoidance is important to keep consistency of dirstate in race
113 This avoidance is important to keep consistency of dirstate in race
115 condition (see issue5584 for detail).
114 condition (see issue5584 for detail).
116
115
117 $ hg parents -q
116 $ hg parents -q
118 1:* (glob)
117 1:* (glob)
119
118
120 $ hg debugrebuilddirstate
119 $ hg debugrebuilddirstate
121 $ hg debugdirstate
120 $ hg debugdirstate
122 n 0 -1 unset a
121 n 0 -1 unset a
123 n 0 -1 unset b
122 n 0 -1 unset b
124 n 0 -1 unset d
123 n 0 -1 unset d
125 n 0 -1 unset dir1/c
124 n 0 -1 unset dir1/c
126 n 0 -1 unset e
125 n 0 -1 unset e
127
126
128 $ cat > $TESTTMP/dirstaterace.sh <<EOF
127 $ cat > $TESTTMP/dirstaterace.sh <<EOF
129 > # This script assumes timetable of typical issue5584 case below:
128 > # This script assumes timetable of typical issue5584 case below:
130 > #
129 > #
131 > # 1. "hg status" loads .hg/dirstate
130 > # 1. "hg status" loads .hg/dirstate
132 > # 2. "hg status" confirms clean-ness of FILE
131 > # 2. "hg status" confirms clean-ness of FILE
133 > # 3. "hg update -C 0" updates the working directory simultaneously
132 > # 3. "hg update -C 0" updates the working directory simultaneously
134 > # (FILE is removed, and FILE is dropped from .hg/dirstate)
133 > # (FILE is removed, and FILE is dropped from .hg/dirstate)
135 > # 4. "hg status" acquires wlock
134 > # 4. "hg status" acquires wlock
136 > # (.hg/dirstate is re-loaded = no FILE entry in dirstate)
135 > # (.hg/dirstate is re-loaded = no FILE entry in dirstate)
137 > # 5. "hg status" marks FILE in dirstate as clean
136 > # 5. "hg status" marks FILE in dirstate as clean
138 > # (FILE entry is added to in-memory dirstate)
137 > # (FILE entry is added to in-memory dirstate)
139 > # 6. "hg status" writes dirstate changes into .hg/dirstate
138 > # 6. "hg status" writes dirstate changes into .hg/dirstate
140 > # (FILE entry is written into .hg/dirstate)
139 > # (FILE entry is written into .hg/dirstate)
141 > #
140 > #
142 > # To reproduce similar situation easily and certainly, #2 and #3
141 > # To reproduce similar situation easily and certainly, #2 and #3
143 > # are swapped. "hg cat" below ensures #2 on "hg status" side.
142 > # are swapped. "hg cat" below ensures #2 on "hg status" side.
144 >
143 >
145 > hg update -q -C 0
144 > hg update -q -C 0
146 > hg cat -r 1 b > b
145 > hg cat -r 1 b > b
147 > EOF
146 > EOF
148
147
149 "hg status" below should excludes "e", of which exec flag is set, for
148 "hg status" below should excludes "e", of which exec flag is set, for
150 portability of test scenario, because unsure but missing "e" is
149 portability of test scenario, because unsure but missing "e" is
151 treated differently in _checklookup() according to runtime platform.
150 treated differently in _checklookup() according to runtime platform.
152
151
153 - "missing(!)" on POSIX, "pctx[f].cmp(self[f])" raises ENOENT
152 - "missing(!)" on POSIX, "pctx[f].cmp(self[f])" raises ENOENT
154 - "modified(M)" on Windows, "self.flags(f) != pctx.flags(f)" is True
153 - "modified(M)" on Windows, "self.flags(f) != pctx.flags(f)" is True
155
154
156 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py --debug -X path:e
155 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py --debug -X path:e
157 skip updating dirstate: identity mismatch
156 skip updating dirstate: identity mismatch
158 M a
157 M a
159 ! d
158 ! d
160 ! dir1/c
159 ! dir1/c
161
160
162 $ hg parents -q
161 $ hg parents -q
163 0:* (glob)
162 0:* (glob)
164 $ hg files
163 $ hg files
165 a
164 a
166 $ hg debugdirstate
165 $ hg debugdirstate
167 n * * * a (glob)
166 n * * * a (glob)
168
167
169 $ rm b
168 $ rm b
170
169
171 #if fsmonitor
170 #if fsmonitor
172
171
173 Create fsmonitor state.
172 Create fsmonitor state.
174
173
175 $ hg status
174 $ hg status
176 $ f --type .hg/fsmonitor.state
175 $ f --type .hg/fsmonitor.state
177 .hg/fsmonitor.state: file
176 .hg/fsmonitor.state: file
178
177
179 Test that invalidating fsmonitor state in the middle (which doesn't require the
178 Test that invalidating fsmonitor state in the middle (which doesn't require the
180 wlock) causes the fsmonitor update to be skipped.
179 wlock) causes the fsmonitor update to be skipped.
181 hg debugrebuilddirstate ensures that the dirstaterace hook will be called, but
180 hg debugrebuilddirstate ensures that the dirstaterace hook will be called, but
182 it also invalidates the fsmonitor state. So back it up and restore it.
181 it also invalidates the fsmonitor state. So back it up and restore it.
183
182
184 $ mv .hg/fsmonitor.state .hg/fsmonitor.state.tmp
183 $ mv .hg/fsmonitor.state .hg/fsmonitor.state.tmp
185 $ hg debugrebuilddirstate
184 $ hg debugrebuilddirstate
186 $ mv .hg/fsmonitor.state.tmp .hg/fsmonitor.state
185 $ mv .hg/fsmonitor.state.tmp .hg/fsmonitor.state
187
186
188 $ cat > $TESTTMP/dirstaterace.sh <<EOF
187 $ cat > $TESTTMP/dirstaterace.sh <<EOF
189 > rm .hg/fsmonitor.state
188 > rm .hg/fsmonitor.state
190 > EOF
189 > EOF
191
190
192 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py --debug
191 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py --debug
193 skip updating fsmonitor.state: identity mismatch
192 skip updating fsmonitor.state: identity mismatch
194 $ f .hg/fsmonitor.state
193 $ f .hg/fsmonitor.state
195 .hg/fsmonitor.state: file not found
194 .hg/fsmonitor.state: file not found
196
195
197 #endif
196 #endif
198
197
199 Set up a rebase situation for issue5581.
198 Set up a rebase situation for issue5581.
200
199
201 $ echo c2 > a
200 $ echo c2 > a
202 $ echo c2 > b
201 $ echo c2 > b
203 $ hg add b
202 $ hg add b
204 $ hg commit -m c2
203 $ hg commit -m c2
205 created new head
204 created new head
206 $ echo c3 >> a
205 $ echo c3 >> a
207 $ hg commit -m c3
206 $ hg commit -m c3
208 $ hg update 2
207 $ hg update 2
209 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
208 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
210 $ echo c4 >> a
209 $ echo c4 >> a
211 $ echo c4 >> b
210 $ echo c4 >> b
212 $ hg commit -m c4
211 $ hg commit -m c4
213 created new head
212 created new head
214
213
215 Configure a merge tool that runs status in the middle of the rebase. The goal of
214 Configure a merge tool that runs status in the middle of the rebase. The goal of
216 the status call is to trigger a potential bug if fsmonitor's state is written
215 the status call is to trigger a potential bug if fsmonitor's state is written
217 even though the wlock is held by another process. The output of 'hg status' in
216 even though the wlock is held by another process. The output of 'hg status' in
218 the merge tool goes to /dev/null because we're more interested in the results of
217 the merge tool goes to /dev/null because we're more interested in the results of
219 'hg status' run after the rebase.
218 'hg status' run after the rebase.
220
219
221 $ cat >> $TESTTMP/mergetool-race.sh << EOF
220 $ cat >> $TESTTMP/mergetool-race.sh << EOF
222 > echo "custom merge tool"
221 > echo "custom merge tool"
223 > printf "c2\nc3\nc4\n" > \$1
222 > printf "c2\nc3\nc4\n" > \$1
224 > hg --cwd "$TESTTMP/repo" status > /dev/null
223 > hg --cwd "$TESTTMP/repo" status > /dev/null
225 > echo "custom merge tool end"
224 > echo "custom merge tool end"
226 > EOF
225 > EOF
227 $ cat >> $HGRCPATH << EOF
226 $ cat >> $HGRCPATH << EOF
228 > [extensions]
227 > [extensions]
229 > rebase =
228 > rebase =
230 > [merge-tools]
229 > [merge-tools]
231 > test.executable=sh
230 > test.executable=sh
232 > test.args=$TESTTMP/mergetool-race.sh \$output
231 > test.args=$TESTTMP/mergetool-race.sh \$output
233 > EOF
232 > EOF
234
233
235 $ hg rebase -s . -d 3 --tool test
234 $ hg rebase -s . -d 3 --tool test
236 rebasing 4:b08445fd6b2a tip "c4"
235 rebasing 4:b08445fd6b2a tip "c4"
237 merging a
236 merging a
238 custom merge tool
237 custom merge tool
239 custom merge tool end
238 custom merge tool end
240 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/* (glob)
239 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/* (glob)
241
240
242 This hg status should be empty, whether or not fsmonitor is enabled (issue5581).
241 This hg status should be empty, whether or not fsmonitor is enabled (issue5581).
243
242
244 $ hg status
243 $ hg status
@@ -1,45 +1,44 b''
1 #testcases dirstate-v1 dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
2
3 #if dirstate-v2
3 #if dirstate-v2
4 #require rust
5 $ echo '[format]' >> $HGRCPATH
4 $ echo '[format]' >> $HGRCPATH
6 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
5 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
7 #endif
6 #endif
8
7
9 Checking the size/permissions/file-type of files stored in the
8 Checking the size/permissions/file-type of files stored in the
10 dirstate after an update where the files are changed concurrently
9 dirstate after an update where the files are changed concurrently
11 outside of hg's control.
10 outside of hg's control.
12
11
13 $ hg init repo
12 $ hg init repo
14 $ cd repo
13 $ cd repo
15 $ echo a > a
14 $ echo a > a
16 $ hg commit -qAm _
15 $ hg commit -qAm _
17 $ echo aa > a
16 $ echo aa > a
18 $ hg commit -m _
17 $ hg commit -m _
19
18
20 $ hg debugdirstate --no-dates
19 $ hg debugdirstate --no-dates
21 n 644 3 (set |unset) a (re)
20 n 644 3 (set |unset) a (re)
22
21
23 $ cat >> $TESTTMP/dirstaterace.py << EOF
22 $ cat >> $TESTTMP/dirstaterace.py << EOF
24 > from mercurial import (
23 > from mercurial import (
25 > extensions,
24 > extensions,
26 > merge,
25 > merge,
27 > )
26 > )
28 > def extsetup(ui):
27 > def extsetup(ui):
29 > extensions.wrapfunction(merge, 'applyupdates', wrap)
28 > extensions.wrapfunction(merge, 'applyupdates', wrap)
30 > def wrap(orig, *args, **kwargs):
29 > def wrap(orig, *args, **kwargs):
31 > res = orig(*args, **kwargs)
30 > res = orig(*args, **kwargs)
32 > with open("a", "w"):
31 > with open("a", "w"):
33 > pass # just truncate the file
32 > pass # just truncate the file
34 > return res
33 > return res
35 > EOF
34 > EOF
36
35
37 Do an update where file 'a' is changed between hg writing it to disk
36 Do an update where file 'a' is changed between hg writing it to disk
38 and hg writing the dirstate. The dirstate is correct nonetheless, and
37 and hg writing the dirstate. The dirstate is correct nonetheless, and
39 so hg status correctly shows a as clean.
38 so hg status correctly shows a as clean.
40
39
41 $ hg up -r 0 --config extensions.race=$TESTTMP/dirstaterace.py
40 $ hg up -r 0 --config extensions.race=$TESTTMP/dirstaterace.py
42 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 $ hg debugdirstate --no-dates
42 $ hg debugdirstate --no-dates
44 n 644 2 (set |unset) a (re)
43 n 644 2 (set |unset) a (re)
45 $ echo a > a; hg status; hg diff
44 $ echo a > a; hg status; hg diff
@@ -1,102 +1,101 b''
1 #testcases dirstate-v1 dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
2
3 #if dirstate-v2
3 #if dirstate-v2
4 #require rust
5 $ echo '[format]' >> $HGRCPATH
4 $ echo '[format]' >> $HGRCPATH
6 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
5 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
7 #endif
6 #endif
8
7
9 ------ Test dirstate._dirs refcounting
8 ------ Test dirstate._dirs refcounting
10
9
11 $ hg init t
10 $ hg init t
12 $ cd t
11 $ cd t
13 $ mkdir -p a/b/c/d
12 $ mkdir -p a/b/c/d
14 $ touch a/b/c/d/x
13 $ touch a/b/c/d/x
15 $ touch a/b/c/d/y
14 $ touch a/b/c/d/y
16 $ touch a/b/c/d/z
15 $ touch a/b/c/d/z
17 $ hg ci -Am m
16 $ hg ci -Am m
18 adding a/b/c/d/x
17 adding a/b/c/d/x
19 adding a/b/c/d/y
18 adding a/b/c/d/y
20 adding a/b/c/d/z
19 adding a/b/c/d/z
21 $ hg mv a z
20 $ hg mv a z
22 moving a/b/c/d/x to z/b/c/d/x
21 moving a/b/c/d/x to z/b/c/d/x
23 moving a/b/c/d/y to z/b/c/d/y
22 moving a/b/c/d/y to z/b/c/d/y
24 moving a/b/c/d/z to z/b/c/d/z
23 moving a/b/c/d/z to z/b/c/d/z
25
24
26 Test name collisions
25 Test name collisions
27
26
28 $ rm z/b/c/d/x
27 $ rm z/b/c/d/x
29 $ mkdir z/b/c/d/x
28 $ mkdir z/b/c/d/x
30 $ touch z/b/c/d/x/y
29 $ touch z/b/c/d/x/y
31 $ hg add z/b/c/d/x/y
30 $ hg add z/b/c/d/x/y
32 abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y'
31 abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y'
33 [255]
32 [255]
34 $ rm -rf z/b/c/d
33 $ rm -rf z/b/c/d
35 $ touch z/b/c/d
34 $ touch z/b/c/d
36 $ hg add z/b/c/d
35 $ hg add z/b/c/d
37 abort: directory 'z/b/c/d' already in dirstate
36 abort: directory 'z/b/c/d' already in dirstate
38 [255]
37 [255]
39
38
40 $ cd ..
39 $ cd ..
41
40
42 Issue1790: dirstate entry locked into unset if file mtime is set into
41 Issue1790: dirstate entry locked into unset if file mtime is set into
43 the future
42 the future
44
43
45 Prepare test repo:
44 Prepare test repo:
46
45
47 $ hg init u
46 $ hg init u
48 $ cd u
47 $ cd u
49 $ echo a > a
48 $ echo a > a
50 $ hg add
49 $ hg add
51 adding a
50 adding a
52 $ hg ci -m1
51 $ hg ci -m1
53
52
54 Set mtime of a into the future:
53 Set mtime of a into the future:
55
54
56 $ touch -t 203101011200 a
55 $ touch -t 203101011200 a
57
56
58 Status must not set a's entry to unset (issue1790):
57 Status must not set a's entry to unset (issue1790):
59
58
60 $ hg status
59 $ hg status
61 $ hg debugstate
60 $ hg debugstate
62 n 644 2 2031-01-01 12:00:00 a
61 n 644 2 2031-01-01 12:00:00 a
63
62
64 Test modulo storage/comparison of absurd dates:
63 Test modulo storage/comparison of absurd dates:
65
64
66 #if no-aix
65 #if no-aix
67 $ touch -t 195001011200 a
66 $ touch -t 195001011200 a
68 $ hg st
67 $ hg st
69 $ hg debugstate
68 $ hg debugstate
70 n 644 2 2018-01-19 15:14:08 a
69 n 644 2 2018-01-19 15:14:08 a
71 #endif
70 #endif
72
71
73 Verify that exceptions during a dirstate change leave the dirstate
72 Verify that exceptions during a dirstate change leave the dirstate
74 coherent (issue4353)
73 coherent (issue4353)
75
74
76 $ cat > ../dirstateexception.py <<EOF
75 $ cat > ../dirstateexception.py <<EOF
77 > from __future__ import absolute_import
76 > from __future__ import absolute_import
78 > from mercurial import (
77 > from mercurial import (
79 > error,
78 > error,
80 > extensions,
79 > extensions,
81 > mergestate as mergestatemod,
80 > mergestate as mergestatemod,
82 > )
81 > )
83 >
82 >
84 > def wraprecordupdates(*args):
83 > def wraprecordupdates(*args):
85 > raise error.Abort(b"simulated error while recording dirstateupdates")
84 > raise error.Abort(b"simulated error while recording dirstateupdates")
86 >
85 >
87 > def reposetup(ui, repo):
86 > def reposetup(ui, repo):
88 > extensions.wrapfunction(mergestatemod, 'recordupdates',
87 > extensions.wrapfunction(mergestatemod, 'recordupdates',
89 > wraprecordupdates)
88 > wraprecordupdates)
90 > EOF
89 > EOF
91
90
92 $ hg rm a
91 $ hg rm a
93 $ hg commit -m 'rm a'
92 $ hg commit -m 'rm a'
94 $ echo "[extensions]" >> .hg/hgrc
93 $ echo "[extensions]" >> .hg/hgrc
95 $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc
94 $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc
96 $ hg up 0
95 $ hg up 0
97 abort: simulated error while recording dirstateupdates
96 abort: simulated error while recording dirstateupdates
98 [255]
97 [255]
99 $ hg log -r . -T '{rev}\n'
98 $ hg log -r . -T '{rev}\n'
100 1
99 1
101 $ hg status
100 $ hg status
102 ? a
101 ? a
@@ -1,417 +1,417 b''
1 #testcases dirstate-v1 dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
2
3 #if dirstate-v2
3 #if dirstate-v2
4 #require rust
5 $ echo '[format]' >> $HGRCPATH
4 $ echo '[format]' >> $HGRCPATH
6 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
5 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
7 #endif
6 #endif
8
7
9 $ hg init ignorerepo
8 $ hg init ignorerepo
10 $ cd ignorerepo
9 $ cd ignorerepo
11
10
12 debugignore with no hgignore should be deterministic:
11 debugignore with no hgignore should be deterministic:
13 $ hg debugignore
12 $ hg debugignore
14 <nevermatcher>
13 <nevermatcher>
15
14
16 Issue562: .hgignore requires newline at end:
15 Issue562: .hgignore requires newline at end:
17
16
18 $ touch foo
17 $ touch foo
19 $ touch bar
18 $ touch bar
20 $ touch baz
19 $ touch baz
21 $ cat > makeignore.py <<EOF
20 $ cat > makeignore.py <<EOF
22 > f = open(".hgignore", "w")
21 > f = open(".hgignore", "w")
23 > f.write("ignore\n")
22 > f.write("ignore\n")
24 > f.write("foo\n")
23 > f.write("foo\n")
25 > # No EOL here
24 > # No EOL here
26 > f.write("bar")
25 > f.write("bar")
27 > f.close()
26 > f.close()
28 > EOF
27 > EOF
29
28
30 $ "$PYTHON" makeignore.py
29 $ "$PYTHON" makeignore.py
31
30
32 Should display baz only:
31 Should display baz only:
33
32
34 $ hg status
33 $ hg status
35 ? baz
34 ? baz
36
35
37 $ rm foo bar baz .hgignore makeignore.py
36 $ rm foo bar baz .hgignore makeignore.py
38
37
39 $ touch a.o
38 $ touch a.o
40 $ touch a.c
39 $ touch a.c
41 $ touch syntax
40 $ touch syntax
42 $ mkdir dir
41 $ mkdir dir
43 $ touch dir/a.o
42 $ touch dir/a.o
44 $ touch dir/b.o
43 $ touch dir/b.o
45 $ touch dir/c.o
44 $ touch dir/c.o
46
45
47 $ hg add dir/a.o
46 $ hg add dir/a.o
48 $ hg commit -m 0
47 $ hg commit -m 0
49 $ hg add dir/b.o
48 $ hg add dir/b.o
50
49
51 $ hg status
50 $ hg status
52 A dir/b.o
51 A dir/b.o
53 ? a.c
52 ? a.c
54 ? a.o
53 ? a.o
55 ? dir/c.o
54 ? dir/c.o
56 ? syntax
55 ? syntax
57
56
58 $ echo "*.o" > .hgignore
57 $ echo "*.o" > .hgignore
59 $ hg status
58 $ hg status
60 abort: $TESTTMP/ignorerepo/.hgignore: invalid pattern (relre): *.o (glob)
59 abort: $TESTTMP/ignorerepo/.hgignore: invalid pattern (relre): *.o (glob)
61 [255]
60 [255]
62
61
63 Ensure given files are relative to cwd
62 Ensure given files are relative to cwd
64
63
65 $ echo "dir/.*\.o" > .hgignore
64 $ echo "dir/.*\.o" > .hgignore
66 $ hg status -i
65 $ hg status -i
67 I dir/c.o
66 I dir/c.o
68
67
69 $ hg debugignore dir/c.o dir/missing.o
68 $ hg debugignore dir/c.o dir/missing.o
70 dir/c.o is ignored
69 dir/c.o is ignored
71 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
70 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
72 dir/missing.o is ignored
71 dir/missing.o is ignored
73 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
72 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
74 $ cd dir
73 $ cd dir
75 $ hg debugignore c.o missing.o
74 $ hg debugignore c.o missing.o
76 c.o is ignored
75 c.o is ignored
77 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
76 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
78 missing.o is ignored
77 missing.o is ignored
79 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
78 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
80
79
81 For icasefs, inexact matches also work, except for missing files
80 For icasefs, inexact matches also work, except for missing files
82
81
83 #if icasefs
82 #if icasefs
84 $ hg debugignore c.O missing.O
83 $ hg debugignore c.O missing.O
85 c.o is ignored
84 c.o is ignored
86 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
85 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: 'dir/.*\.o') (glob)
87 missing.O is not ignored
86 missing.O is not ignored
88 #endif
87 #endif
89
88
90 $ cd ..
89 $ cd ..
91
90
92 $ echo ".*\.o" > .hgignore
91 $ echo ".*\.o" > .hgignore
93 $ hg status
92 $ hg status
94 A dir/b.o
93 A dir/b.o
95 ? .hgignore
94 ? .hgignore
96 ? a.c
95 ? a.c
97 ? syntax
96 ? syntax
98
97
99 Ensure that comments work:
98 Ensure that comments work:
100
99
101 $ touch 'foo#bar' 'quux#' 'quu0#'
100 $ touch 'foo#bar' 'quux#' 'quu0#'
102 #if no-windows
101 #if no-windows
103 $ touch 'baz\' 'baz\wat' 'ba0\#wat' 'ba1\\' 'ba1\\wat' 'quu0\'
102 $ touch 'baz\' 'baz\wat' 'ba0\#wat' 'ba1\\' 'ba1\\wat' 'quu0\'
104 #endif
103 #endif
105
104
106 $ cat <<'EOF' >> .hgignore
105 $ cat <<'EOF' >> .hgignore
107 > # full-line comment
106 > # full-line comment
108 > # whitespace-only comment line
107 > # whitespace-only comment line
109 > syntax# pattern, no whitespace, then comment
108 > syntax# pattern, no whitespace, then comment
110 > a.c # pattern, then whitespace, then comment
109 > a.c # pattern, then whitespace, then comment
111 > baz\\# # (escaped) backslash, then comment
110 > baz\\# # (escaped) backslash, then comment
112 > ba0\\\#w # (escaped) backslash, escaped comment character, then comment
111 > ba0\\\#w # (escaped) backslash, escaped comment character, then comment
113 > ba1\\\\# # (escaped) backslashes, then comment
112 > ba1\\\\# # (escaped) backslashes, then comment
114 > foo\#b # escaped comment character
113 > foo\#b # escaped comment character
115 > quux\## escaped comment character at end of name
114 > quux\## escaped comment character at end of name
116 > EOF
115 > EOF
117 $ hg status
116 $ hg status
118 A dir/b.o
117 A dir/b.o
119 ? .hgignore
118 ? .hgignore
120 ? quu0#
119 ? quu0#
121 ? quu0\ (no-windows !)
120 ? quu0\ (no-windows !)
122
121
123 $ cat <<'EOF' > .hgignore
122 $ cat <<'EOF' > .hgignore
124 > .*\.o
123 > .*\.o
125 > syntax: glob
124 > syntax: glob
126 > syntax# pattern, no whitespace, then comment
125 > syntax# pattern, no whitespace, then comment
127 > a.c # pattern, then whitespace, then comment
126 > a.c # pattern, then whitespace, then comment
128 > baz\\#* # (escaped) backslash, then comment
127 > baz\\#* # (escaped) backslash, then comment
129 > ba0\\\#w* # (escaped) backslash, escaped comment character, then comment
128 > ba0\\\#w* # (escaped) backslash, escaped comment character, then comment
130 > ba1\\\\#* # (escaped) backslashes, then comment
129 > ba1\\\\#* # (escaped) backslashes, then comment
131 > foo\#b* # escaped comment character
130 > foo\#b* # escaped comment character
132 > quux\## escaped comment character at end of name
131 > quux\## escaped comment character at end of name
133 > quu0[\#]# escaped comment character inside [...]
132 > quu0[\#]# escaped comment character inside [...]
134 > EOF
133 > EOF
135 $ hg status
134 $ hg status
136 A dir/b.o
135 A dir/b.o
137 ? .hgignore
136 ? .hgignore
138 ? ba1\\wat (no-windows !)
137 ? ba1\\wat (no-windows !)
139 ? baz\wat (no-windows !)
138 ? baz\wat (no-windows !)
140 ? quu0\ (no-windows !)
139 ? quu0\ (no-windows !)
141
140
142 $ rm 'foo#bar' 'quux#' 'quu0#'
141 $ rm 'foo#bar' 'quux#' 'quu0#'
143 #if no-windows
142 #if no-windows
144 $ rm 'baz\' 'baz\wat' 'ba0\#wat' 'ba1\\' 'ba1\\wat' 'quu0\'
143 $ rm 'baz\' 'baz\wat' 'ba0\#wat' 'ba1\\' 'ba1\\wat' 'quu0\'
145 #endif
144 #endif
146
145
147 Check that '^\.' does not ignore the root directory:
146 Check that '^\.' does not ignore the root directory:
148
147
149 $ echo "^\." > .hgignore
148 $ echo "^\." > .hgignore
150 $ hg status
149 $ hg status
151 A dir/b.o
150 A dir/b.o
152 ? a.c
151 ? a.c
153 ? a.o
152 ? a.o
154 ? dir/c.o
153 ? dir/c.o
155 ? syntax
154 ? syntax
156
155
157 Test that patterns from ui.ignore options are read:
156 Test that patterns from ui.ignore options are read:
158
157
159 $ echo > .hgignore
158 $ echo > .hgignore
160 $ cat >> $HGRCPATH << EOF
159 $ cat >> $HGRCPATH << EOF
161 > [ui]
160 > [ui]
162 > ignore.other = $TESTTMP/ignorerepo/.hg/testhgignore
161 > ignore.other = $TESTTMP/ignorerepo/.hg/testhgignore
163 > EOF
162 > EOF
164 $ echo "glob:**.o" > .hg/testhgignore
163 $ echo "glob:**.o" > .hg/testhgignore
165 $ hg status
164 $ hg status
166 A dir/b.o
165 A dir/b.o
167 ? .hgignore
166 ? .hgignore
168 ? a.c
167 ? a.c
169 ? syntax
168 ? syntax
170
169
171 empty out testhgignore
170 empty out testhgignore
172 $ echo > .hg/testhgignore
171 $ echo > .hg/testhgignore
173
172
174 Test relative ignore path (issue4473):
173 Test relative ignore path (issue4473):
175
174
176 $ cat >> $HGRCPATH << EOF
175 $ cat >> $HGRCPATH << EOF
177 > [ui]
176 > [ui]
178 > ignore.relative = .hg/testhgignorerel
177 > ignore.relative = .hg/testhgignorerel
179 > EOF
178 > EOF
180 $ echo "glob:*.o" > .hg/testhgignorerel
179 $ echo "glob:*.o" > .hg/testhgignorerel
181 $ cd dir
180 $ cd dir
182 $ hg status
181 $ hg status
183 A dir/b.o
182 A dir/b.o
184 ? .hgignore
183 ? .hgignore
185 ? a.c
184 ? a.c
186 ? syntax
185 ? syntax
187 $ hg debugignore
186 $ hg debugignore
188 <includematcher includes='.*\\.o(?:/|$)'>
187 <includematcher includes='.*\\.o(?:/|$)'>
189
188
190 $ cd ..
189 $ cd ..
191 $ echo > .hg/testhgignorerel
190 $ echo > .hg/testhgignorerel
192 $ echo "syntax: glob" > .hgignore
191 $ echo "syntax: glob" > .hgignore
193 $ echo "re:.*\.o" >> .hgignore
192 $ echo "re:.*\.o" >> .hgignore
194 $ hg status
193 $ hg status
195 A dir/b.o
194 A dir/b.o
196 ? .hgignore
195 ? .hgignore
197 ? a.c
196 ? a.c
198 ? syntax
197 ? syntax
199
198
200 $ echo "syntax: invalid" > .hgignore
199 $ echo "syntax: invalid" > .hgignore
201 $ hg status
200 $ hg status
202 $TESTTMP/ignorerepo/.hgignore: ignoring invalid syntax 'invalid'
201 $TESTTMP/ignorerepo/.hgignore: ignoring invalid syntax 'invalid'
203 A dir/b.o
202 A dir/b.o
204 ? .hgignore
203 ? .hgignore
205 ? a.c
204 ? a.c
206 ? a.o
205 ? a.o
207 ? dir/c.o
206 ? dir/c.o
208 ? syntax
207 ? syntax
209
208
210 $ echo "syntax: glob" > .hgignore
209 $ echo "syntax: glob" > .hgignore
211 $ echo "*.o" >> .hgignore
210 $ echo "*.o" >> .hgignore
212 $ hg status
211 $ hg status
213 A dir/b.o
212 A dir/b.o
214 ? .hgignore
213 ? .hgignore
215 ? a.c
214 ? a.c
216 ? syntax
215 ? syntax
217
216
218 $ echo "relglob:syntax*" > .hgignore
217 $ echo "relglob:syntax*" > .hgignore
219 $ hg status
218 $ hg status
220 A dir/b.o
219 A dir/b.o
221 ? .hgignore
220 ? .hgignore
222 ? a.c
221 ? a.c
223 ? a.o
222 ? a.o
224 ? dir/c.o
223 ? dir/c.o
225
224
226 $ echo "relglob:*" > .hgignore
225 $ echo "relglob:*" > .hgignore
227 $ hg status
226 $ hg status
228 A dir/b.o
227 A dir/b.o
229
228
230 $ cd dir
229 $ cd dir
231 $ hg status .
230 $ hg status .
232 A b.o
231 A b.o
233
232
234 $ hg debugignore
233 $ hg debugignore
235 <includematcher includes='.*(?:/|$)'>
234 <includematcher includes='.*(?:/|$)'>
236
235
237 $ hg debugignore b.o
236 $ hg debugignore b.o
238 b.o is ignored
237 b.o is ignored
239 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: '*') (glob)
238 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: '*') (glob)
240
239
241 $ cd ..
240 $ cd ..
242
241
243 Check patterns that match only the directory
242 Check patterns that match only the directory
244
243
245 "(fsmonitor !)" below assumes that fsmonitor is enabled with
244 "(fsmonitor !)" below assumes that fsmonitor is enabled with
246 "walk_on_invalidate = false" (default), which doesn't involve
245 "walk_on_invalidate = false" (default), which doesn't involve
247 re-walking whole repository at detection of .hgignore change.
246 re-walking whole repository at detection of .hgignore change.
248
247
249 $ echo "^dir\$" > .hgignore
248 $ echo "^dir\$" > .hgignore
250 $ hg status
249 $ hg status
251 A dir/b.o
250 A dir/b.o
252 ? .hgignore
251 ? .hgignore
253 ? a.c
252 ? a.c
254 ? a.o
253 ? a.o
255 ? dir/c.o (fsmonitor !)
254 ? dir/c.o (fsmonitor !)
256 ? syntax
255 ? syntax
257
256
258 Check recursive glob pattern matches no directories (dir/**/c.o matches dir/c.o)
257 Check recursive glob pattern matches no directories (dir/**/c.o matches dir/c.o)
259
258
260 $ echo "syntax: glob" > .hgignore
259 $ echo "syntax: glob" > .hgignore
261 $ echo "dir/**/c.o" >> .hgignore
260 $ echo "dir/**/c.o" >> .hgignore
262 $ touch dir/c.o
261 $ touch dir/c.o
263 $ mkdir dir/subdir
262 $ mkdir dir/subdir
264 $ touch dir/subdir/c.o
263 $ touch dir/subdir/c.o
265 $ hg status
264 $ hg status
266 A dir/b.o
265 A dir/b.o
267 ? .hgignore
266 ? .hgignore
268 ? a.c
267 ? a.c
269 ? a.o
268 ? a.o
270 ? syntax
269 ? syntax
271 $ hg debugignore a.c
270 $ hg debugignore a.c
272 a.c is not ignored
271 a.c is not ignored
273 $ hg debugignore dir/c.o
272 $ hg debugignore dir/c.o
274 dir/c.o is ignored
273 dir/c.o is ignored
275 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 2: 'dir/**/c.o') (glob)
274 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 2: 'dir/**/c.o') (glob)
276
275
277 Check rooted globs
276 Check rooted globs
278
277
279 $ hg purge --all --config extensions.purge=
278 $ hg purge --all --config extensions.purge=
280 $ echo "syntax: rootglob" > .hgignore
279 $ echo "syntax: rootglob" > .hgignore
281 $ echo "a/*.ext" >> .hgignore
280 $ echo "a/*.ext" >> .hgignore
282 $ for p in a b/a aa; do mkdir -p $p; touch $p/b.ext; done
281 $ for p in a b/a aa; do mkdir -p $p; touch $p/b.ext; done
283 $ hg status -A 'set:**.ext'
282 $ hg status -A 'set:**.ext'
284 ? aa/b.ext
283 ? aa/b.ext
285 ? b/a/b.ext
284 ? b/a/b.ext
286 I a/b.ext
285 I a/b.ext
287
286
288 Check using 'include:' in ignore file
287 Check using 'include:' in ignore file
289
288
290 $ hg purge --all --config extensions.purge=
289 $ hg purge --all --config extensions.purge=
291 $ touch foo.included
290 $ touch foo.included
292
291
293 $ echo ".*.included" > otherignore
292 $ echo ".*.included" > otherignore
294 $ hg status -I "include:otherignore"
293 $ hg status -I "include:otherignore"
295 ? foo.included
294 ? foo.included
296
295
297 $ echo "include:otherignore" >> .hgignore
296 $ echo "include:otherignore" >> .hgignore
298 $ hg status
297 $ hg status
299 A dir/b.o
298 A dir/b.o
300 ? .hgignore
299 ? .hgignore
301 ? otherignore
300 ? otherignore
302
301
303 Check recursive uses of 'include:'
302 Check recursive uses of 'include:'
304
303
305 $ echo "include:nested/ignore" >> otherignore
304 $ echo "include:nested/ignore" >> otherignore
306 $ mkdir nested nested/more
305 $ mkdir nested nested/more
307 $ echo "glob:*ignore" > nested/ignore
306 $ echo "glob:*ignore" > nested/ignore
308 $ echo "rootglob:a" >> nested/ignore
307 $ echo "rootglob:a" >> nested/ignore
309 $ touch a nested/a nested/more/a
308 $ touch a nested/a nested/more/a
310 $ hg status
309 $ hg status
311 A dir/b.o
310 A dir/b.o
312 ? nested/a
311 ? nested/a
313 ? nested/more/a
312 ? nested/more/a
314 $ rm a nested/a nested/more/a
313 $ rm a nested/a nested/more/a
315
314
316 $ cp otherignore goodignore
315 $ cp otherignore goodignore
317 $ echo "include:badignore" >> otherignore
316 $ echo "include:badignore" >> otherignore
318 $ hg status
317 $ hg status
319 skipping unreadable pattern file 'badignore': $ENOENT$
318 skipping unreadable pattern file 'badignore': $ENOENT$
320 A dir/b.o
319 A dir/b.o
321
320
322 $ mv goodignore otherignore
321 $ mv goodignore otherignore
323
322
324 Check using 'include:' while in a non-root directory
323 Check using 'include:' while in a non-root directory
325
324
326 $ cd ..
325 $ cd ..
327 $ hg -R ignorerepo status
326 $ hg -R ignorerepo status
328 A dir/b.o
327 A dir/b.o
329 $ cd ignorerepo
328 $ cd ignorerepo
330
329
331 Check including subincludes
330 Check including subincludes
332
331
333 $ hg revert -q --all
332 $ hg revert -q --all
334 $ hg purge --all --config extensions.purge=
333 $ hg purge --all --config extensions.purge=
335 $ echo ".hgignore" > .hgignore
334 $ echo ".hgignore" > .hgignore
336 $ mkdir dir1 dir2
335 $ mkdir dir1 dir2
337 $ touch dir1/file1 dir1/file2 dir2/file1 dir2/file2
336 $ touch dir1/file1 dir1/file2 dir2/file1 dir2/file2
338 $ echo "subinclude:dir2/.hgignore" >> .hgignore
337 $ echo "subinclude:dir2/.hgignore" >> .hgignore
339 $ echo "glob:file*2" > dir2/.hgignore
338 $ echo "glob:file*2" > dir2/.hgignore
340 $ hg status
339 $ hg status
341 ? dir1/file1
340 ? dir1/file1
342 ? dir1/file2
341 ? dir1/file2
343 ? dir2/file1
342 ? dir2/file1
344
343
345 Check including subincludes with other patterns
344 Check including subincludes with other patterns
346
345
347 $ echo "subinclude:dir1/.hgignore" >> .hgignore
346 $ echo "subinclude:dir1/.hgignore" >> .hgignore
348
347
349 $ mkdir dir1/subdir
348 $ mkdir dir1/subdir
350 $ touch dir1/subdir/file1
349 $ touch dir1/subdir/file1
351 $ echo "rootglob:f?le1" > dir1/.hgignore
350 $ echo "rootglob:f?le1" > dir1/.hgignore
352 $ hg status
351 $ hg status
353 ? dir1/file2
352 ? dir1/file2
354 ? dir1/subdir/file1
353 ? dir1/subdir/file1
355 ? dir2/file1
354 ? dir2/file1
356 $ rm dir1/subdir/file1
355 $ rm dir1/subdir/file1
357
356
358 $ echo "regexp:f.le1" > dir1/.hgignore
357 $ echo "regexp:f.le1" > dir1/.hgignore
359 $ hg status
358 $ hg status
360 ? dir1/file2
359 ? dir1/file2
361 ? dir2/file1
360 ? dir2/file1
362
361
363 Check multiple levels of sub-ignores
362 Check multiple levels of sub-ignores
364
363
365 $ touch dir1/subdir/subfile1 dir1/subdir/subfile3 dir1/subdir/subfile4
364 $ touch dir1/subdir/subfile1 dir1/subdir/subfile3 dir1/subdir/subfile4
366 $ echo "subinclude:subdir/.hgignore" >> dir1/.hgignore
365 $ echo "subinclude:subdir/.hgignore" >> dir1/.hgignore
367 $ echo "glob:subfil*3" >> dir1/subdir/.hgignore
366 $ echo "glob:subfil*3" >> dir1/subdir/.hgignore
368
367
369 $ hg status
368 $ hg status
370 ? dir1/file2
369 ? dir1/file2
371 ? dir1/subdir/subfile4
370 ? dir1/subdir/subfile4
372 ? dir2/file1
371 ? dir2/file1
373
372
374 Check include subignore at the same level
373 Check include subignore at the same level
375
374
376 $ mv dir1/subdir/.hgignore dir1/.hgignoretwo
375 $ mv dir1/subdir/.hgignore dir1/.hgignoretwo
377 $ echo "regexp:f.le1" > dir1/.hgignore
376 $ echo "regexp:f.le1" > dir1/.hgignore
378 $ echo "subinclude:.hgignoretwo" >> dir1/.hgignore
377 $ echo "subinclude:.hgignoretwo" >> dir1/.hgignore
379 $ echo "glob:file*2" > dir1/.hgignoretwo
378 $ echo "glob:file*2" > dir1/.hgignoretwo
380
379
381 $ hg status | grep file2
380 $ hg status | grep file2
382 [1]
381 [1]
383 $ hg debugignore dir1/file2
382 $ hg debugignore dir1/file2
384 dir1/file2 is ignored
383 dir1/file2 is ignored
385 (ignore rule in dir2/.hgignore, line 1: 'file*2')
384 (ignore rule in dir2/.hgignore, line 1: 'file*2')
386
385
387 #if windows
386 #if windows
388
387
389 Windows paths are accepted on input
388 Windows paths are accepted on input
390
389
391 $ rm dir1/.hgignore
390 $ rm dir1/.hgignore
392 $ echo "dir1/file*" >> .hgignore
391 $ echo "dir1/file*" >> .hgignore
393 $ hg debugignore "dir1\file2"
392 $ hg debugignore "dir1\file2"
394 dir1/file2 is ignored
393 dir1/file2 is ignored
395 (ignore rule in $TESTTMP\ignorerepo\.hgignore, line 4: 'dir1/file*')
394 (ignore rule in $TESTTMP\ignorerepo\.hgignore, line 4: 'dir1/file*')
396 $ hg up -qC .
395 $ hg up -qC .
397
396
398 #endif
397 #endif
399
398
400 #if dirstate-v2
399 #if dirstate-v2 rust
401
400
402 Check the hash of ignore patterns written in the dirstate
401 Check the hash of ignore patterns written in the dirstate
402 This is an optimization that is only relevant when using the Rust extensions
403
403
404 $ hg status > /dev/null
404 $ hg status > /dev/null
405 $ cat .hg/testhgignore .hg/testhgignorerel .hgignore dir2/.hgignore dir1/.hgignore dir1/.hgignoretwo | $TESTDIR/f --sha1
405 $ cat .hg/testhgignore .hg/testhgignorerel .hgignore dir2/.hgignore dir1/.hgignore dir1/.hgignoretwo | $TESTDIR/f --sha1
406 sha1=6e315b60f15fb5dfa02be00f3e2c8f923051f5ff
406 sha1=6e315b60f15fb5dfa02be00f3e2c8f923051f5ff
407 $ hg debugdirstateignorepatternshash
407 $ hg debugdirstateignorepatternshash
408 6e315b60f15fb5dfa02be00f3e2c8f923051f5ff
408 6e315b60f15fb5dfa02be00f3e2c8f923051f5ff
409
409
410 $ echo rel > .hg/testhgignorerel
410 $ echo rel > .hg/testhgignorerel
411 $ hg status > /dev/null
411 $ hg status > /dev/null
412 $ cat .hg/testhgignore .hg/testhgignorerel .hgignore dir2/.hgignore dir1/.hgignore dir1/.hgignoretwo | $TESTDIR/f --sha1
412 $ cat .hg/testhgignore .hg/testhgignorerel .hgignore dir2/.hgignore dir1/.hgignore dir1/.hgignoretwo | $TESTDIR/f --sha1
413 sha1=dea19cc7119213f24b6b582a4bae7b0cb063e34e
413 sha1=dea19cc7119213f24b6b582a4bae7b0cb063e34e
414 $ hg debugdirstateignorepatternshash
414 $ hg debugdirstateignorepatternshash
415 dea19cc7119213f24b6b582a4bae7b0cb063e34e
415 dea19cc7119213f24b6b582a4bae7b0cb063e34e
416
416
417 #endif
417 #endif
@@ -1,87 +1,86 b''
1 #require unix-permissions no-root reporevlogstore
1 #require unix-permissions no-root reporevlogstore
2
2
3 #testcases dirstate-v1 dirstate-v2
3 #testcases dirstate-v1 dirstate-v2
4
4
5 #if dirstate-v2
5 #if dirstate-v2
6 #require rust
7 $ echo '[format]' >> $HGRCPATH
6 $ echo '[format]' >> $HGRCPATH
8 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
7 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
9 #endif
8 #endif
10
9
11 $ hg init t
10 $ hg init t
12 $ cd t
11 $ cd t
13
12
14 $ echo foo > a
13 $ echo foo > a
15 $ hg add a
14 $ hg add a
16
15
17 $ hg commit -m "1"
16 $ hg commit -m "1"
18
17
19 $ hg verify
18 $ hg verify
20 checking changesets
19 checking changesets
21 checking manifests
20 checking manifests
22 crosschecking files in changesets and manifests
21 crosschecking files in changesets and manifests
23 checking files
22 checking files
24 checked 1 changesets with 1 changes to 1 files
23 checked 1 changesets with 1 changes to 1 files
25
24
26 $ chmod -r .hg/store/data/a.i
25 $ chmod -r .hg/store/data/a.i
27
26
28 $ hg verify
27 $ hg verify
29 checking changesets
28 checking changesets
30 checking manifests
29 checking manifests
31 crosschecking files in changesets and manifests
30 crosschecking files in changesets and manifests
32 checking files
31 checking files
33 abort: Permission denied: '$TESTTMP/t/.hg/store/data/a.i'
32 abort: Permission denied: '$TESTTMP/t/.hg/store/data/a.i'
34 [255]
33 [255]
35
34
36 $ chmod +r .hg/store/data/a.i
35 $ chmod +r .hg/store/data/a.i
37
36
38 $ hg verify
37 $ hg verify
39 checking changesets
38 checking changesets
40 checking manifests
39 checking manifests
41 crosschecking files in changesets and manifests
40 crosschecking files in changesets and manifests
42 checking files
41 checking files
43 checked 1 changesets with 1 changes to 1 files
42 checked 1 changesets with 1 changes to 1 files
44
43
45 $ chmod -w .hg/store/data/a.i
44 $ chmod -w .hg/store/data/a.i
46
45
47 $ echo barber > a
46 $ echo barber > a
48 $ hg commit -m "2"
47 $ hg commit -m "2"
49 trouble committing a!
48 trouble committing a!
50 abort: Permission denied: '$TESTTMP/t/.hg/store/data/a.i'
49 abort: Permission denied: '$TESTTMP/t/.hg/store/data/a.i'
51 [255]
50 [255]
52
51
53 $ chmod -w .
52 $ chmod -w .
54
53
55 $ hg diff --nodates
54 $ hg diff --nodates
56 diff -r 2a18120dc1c9 a
55 diff -r 2a18120dc1c9 a
57 --- a/a
56 --- a/a
58 +++ b/a
57 +++ b/a
59 @@ -1,1 +1,1 @@
58 @@ -1,1 +1,1 @@
60 -foo
59 -foo
61 +barber
60 +barber
62
61
63 $ chmod +w .
62 $ chmod +w .
64
63
65 $ chmod +w .hg/store/data/a.i
64 $ chmod +w .hg/store/data/a.i
66 $ mkdir dir
65 $ mkdir dir
67 $ touch dir/a
66 $ touch dir/a
68 $ hg status
67 $ hg status
69 M a
68 M a
70 ? dir/a
69 ? dir/a
71 $ chmod -rx dir
70 $ chmod -rx dir
72
71
73 #if no-fsmonitor
72 #if no-fsmonitor
74
73
75 (fsmonitor makes "hg status" avoid accessing to "dir")
74 (fsmonitor makes "hg status" avoid accessing to "dir")
76
75
77 $ hg status
76 $ hg status
78 dir: Permission denied
77 dir: Permission denied
79 M a
78 M a
80
79
81 #endif
80 #endif
82
81
83 Reenable perm to allow deletion:
82 Reenable perm to allow deletion:
84
83
85 $ chmod +rx dir
84 $ chmod +rx dir
86
85
87 $ cd ..
86 $ cd ..
@@ -1,350 +1,349 b''
1 #testcases dirstate-v1 dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
2
3 #if dirstate-v2
3 #if dirstate-v2
4 #require rust
5 $ echo '[format]' >> $HGRCPATH
4 $ echo '[format]' >> $HGRCPATH
6 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
5 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
7 #endif
6 #endif
8
7
9 init
8 init
10
9
11 $ hg init t
10 $ hg init t
12 $ cd t
11 $ cd t
13
12
14 setup
13 setup
15
14
16 $ echo r1 > r1
15 $ echo r1 > r1
17 $ hg ci -qAmr1 -d'0 0'
16 $ hg ci -qAmr1 -d'0 0'
18 $ mkdir directory
17 $ mkdir directory
19 $ echo r2 > directory/r2
18 $ echo r2 > directory/r2
20 $ hg ci -qAmr2 -d'1 0'
19 $ hg ci -qAmr2 -d'1 0'
21 $ echo 'ignored' > .hgignore
20 $ echo 'ignored' > .hgignore
22 $ hg ci -qAmr3 -d'2 0'
21 $ hg ci -qAmr3 -d'2 0'
23
22
24 purge without the extension
23 purge without the extension
25
24
26 $ hg st
25 $ hg st
27 $ touch foo
26 $ touch foo
28 $ hg purge
27 $ hg purge
29 permanently delete 1 unkown files? (yN) n
28 permanently delete 1 unkown files? (yN) n
30 abort: removal cancelled
29 abort: removal cancelled
31 [250]
30 [250]
32 $ hg st
31 $ hg st
33 ? foo
32 ? foo
34 $ hg purge --no-confirm
33 $ hg purge --no-confirm
35 $ hg st
34 $ hg st
36
35
37 now enabling the extension
36 now enabling the extension
38
37
39 $ cat <<EOF >> $HGRCPATH
38 $ cat <<EOF >> $HGRCPATH
40 > [extensions]
39 > [extensions]
41 > purge =
40 > purge =
42 > EOF
41 > EOF
43
42
44 delete an empty directory
43 delete an empty directory
45
44
46 $ mkdir empty_dir
45 $ mkdir empty_dir
47 $ hg purge -p -v
46 $ hg purge -p -v
48 empty_dir
47 empty_dir
49 $ hg purge --confirm
48 $ hg purge --confirm
50 permanently delete at least 1 empty directories? (yN) n
49 permanently delete at least 1 empty directories? (yN) n
51 abort: removal cancelled
50 abort: removal cancelled
52 [250]
51 [250]
53 $ hg purge -v
52 $ hg purge -v
54 removing directory empty_dir
53 removing directory empty_dir
55 $ ls -A
54 $ ls -A
56 .hg
55 .hg
57 .hgignore
56 .hgignore
58 directory
57 directory
59 r1
58 r1
60
59
61 delete an untracked directory
60 delete an untracked directory
62
61
63 $ mkdir untracked_dir
62 $ mkdir untracked_dir
64 $ touch untracked_dir/untracked_file1
63 $ touch untracked_dir/untracked_file1
65 $ touch untracked_dir/untracked_file2
64 $ touch untracked_dir/untracked_file2
66 $ hg purge -p
65 $ hg purge -p
67 untracked_dir/untracked_file1
66 untracked_dir/untracked_file1
68 untracked_dir/untracked_file2
67 untracked_dir/untracked_file2
69 $ hg purge -v
68 $ hg purge -v
70 removing file untracked_dir/untracked_file1
69 removing file untracked_dir/untracked_file1
71 removing file untracked_dir/untracked_file2
70 removing file untracked_dir/untracked_file2
72 removing directory untracked_dir
71 removing directory untracked_dir
73 $ ls -A
72 $ ls -A
74 .hg
73 .hg
75 .hgignore
74 .hgignore
76 directory
75 directory
77 r1
76 r1
78
77
79 delete an untracked file
78 delete an untracked file
80
79
81 $ touch untracked_file
80 $ touch untracked_file
82 $ touch untracked_file_readonly
81 $ touch untracked_file_readonly
83 $ "$PYTHON" <<EOF
82 $ "$PYTHON" <<EOF
84 > import os
83 > import os
85 > import stat
84 > import stat
86 > f = 'untracked_file_readonly'
85 > f = 'untracked_file_readonly'
87 > os.chmod(f, stat.S_IMODE(os.stat(f).st_mode) & ~stat.S_IWRITE)
86 > os.chmod(f, stat.S_IMODE(os.stat(f).st_mode) & ~stat.S_IWRITE)
88 > EOF
87 > EOF
89 $ hg purge -p
88 $ hg purge -p
90 untracked_file
89 untracked_file
91 untracked_file_readonly
90 untracked_file_readonly
92 $ hg purge --confirm
91 $ hg purge --confirm
93 permanently delete 2 unkown files? (yN) n
92 permanently delete 2 unkown files? (yN) n
94 abort: removal cancelled
93 abort: removal cancelled
95 [250]
94 [250]
96 $ hg purge -v
95 $ hg purge -v
97 removing file untracked_file
96 removing file untracked_file
98 removing file untracked_file_readonly
97 removing file untracked_file_readonly
99 $ ls -A
98 $ ls -A
100 .hg
99 .hg
101 .hgignore
100 .hgignore
102 directory
101 directory
103 r1
102 r1
104
103
105 delete an untracked file in a tracked directory
104 delete an untracked file in a tracked directory
106
105
107 $ touch directory/untracked_file
106 $ touch directory/untracked_file
108 $ hg purge -p
107 $ hg purge -p
109 directory/untracked_file
108 directory/untracked_file
110 $ hg purge -v
109 $ hg purge -v
111 removing file directory/untracked_file
110 removing file directory/untracked_file
112 $ ls -A
111 $ ls -A
113 .hg
112 .hg
114 .hgignore
113 .hgignore
115 directory
114 directory
116 r1
115 r1
117
116
118 delete nested directories
117 delete nested directories
119
118
120 $ mkdir -p untracked_directory/nested_directory
119 $ mkdir -p untracked_directory/nested_directory
121 $ hg purge -p
120 $ hg purge -p
122 untracked_directory/nested_directory
121 untracked_directory/nested_directory
123 $ hg purge -v
122 $ hg purge -v
124 removing directory untracked_directory/nested_directory
123 removing directory untracked_directory/nested_directory
125 removing directory untracked_directory
124 removing directory untracked_directory
126 $ ls -A
125 $ ls -A
127 .hg
126 .hg
128 .hgignore
127 .hgignore
129 directory
128 directory
130 r1
129 r1
131
130
132 delete nested directories from a subdir
131 delete nested directories from a subdir
133
132
134 $ mkdir -p untracked_directory/nested_directory
133 $ mkdir -p untracked_directory/nested_directory
135 $ cd directory
134 $ cd directory
136 $ hg purge -p
135 $ hg purge -p
137 untracked_directory/nested_directory
136 untracked_directory/nested_directory
138 $ hg purge -v
137 $ hg purge -v
139 removing directory untracked_directory/nested_directory
138 removing directory untracked_directory/nested_directory
140 removing directory untracked_directory
139 removing directory untracked_directory
141 $ cd ..
140 $ cd ..
142 $ ls -A
141 $ ls -A
143 .hg
142 .hg
144 .hgignore
143 .hgignore
145 directory
144 directory
146 r1
145 r1
147
146
148 delete only part of the tree
147 delete only part of the tree
149
148
150 $ mkdir -p untracked_directory/nested_directory
149 $ mkdir -p untracked_directory/nested_directory
151 $ touch directory/untracked_file
150 $ touch directory/untracked_file
152 $ cd directory
151 $ cd directory
153 $ hg purge -p ../untracked_directory
152 $ hg purge -p ../untracked_directory
154 untracked_directory/nested_directory
153 untracked_directory/nested_directory
155 $ hg purge --confirm
154 $ hg purge --confirm
156 permanently delete 1 unkown files? (yN) n
155 permanently delete 1 unkown files? (yN) n
157 abort: removal cancelled
156 abort: removal cancelled
158 [250]
157 [250]
159 $ hg purge -v ../untracked_directory
158 $ hg purge -v ../untracked_directory
160 removing directory untracked_directory/nested_directory
159 removing directory untracked_directory/nested_directory
161 removing directory untracked_directory
160 removing directory untracked_directory
162 $ cd ..
161 $ cd ..
163 $ ls -A
162 $ ls -A
164 .hg
163 .hg
165 .hgignore
164 .hgignore
166 directory
165 directory
167 r1
166 r1
168 $ ls directory/untracked_file
167 $ ls directory/untracked_file
169 directory/untracked_file
168 directory/untracked_file
170 $ rm directory/untracked_file
169 $ rm directory/untracked_file
171
170
172 skip ignored files if -i or --all not specified
171 skip ignored files if -i or --all not specified
173
172
174 $ touch ignored
173 $ touch ignored
175 $ hg purge -p
174 $ hg purge -p
176 $ hg purge --confirm
175 $ hg purge --confirm
177 $ hg purge -v
176 $ hg purge -v
178 $ touch untracked_file
177 $ touch untracked_file
179 $ ls
178 $ ls
180 directory
179 directory
181 ignored
180 ignored
182 r1
181 r1
183 untracked_file
182 untracked_file
184 $ hg purge -p -i
183 $ hg purge -p -i
185 ignored
184 ignored
186 $ hg purge --confirm -i
185 $ hg purge --confirm -i
187 permanently delete 1 ignored files? (yN) n
186 permanently delete 1 ignored files? (yN) n
188 abort: removal cancelled
187 abort: removal cancelled
189 [250]
188 [250]
190 $ hg purge -v -i
189 $ hg purge -v -i
191 removing file ignored
190 removing file ignored
192 $ ls -A
191 $ ls -A
193 .hg
192 .hg
194 .hgignore
193 .hgignore
195 directory
194 directory
196 r1
195 r1
197 untracked_file
196 untracked_file
198 $ touch ignored
197 $ touch ignored
199 $ hg purge -p --all
198 $ hg purge -p --all
200 ignored
199 ignored
201 untracked_file
200 untracked_file
202 $ hg purge --confirm --all
201 $ hg purge --confirm --all
203 permanently delete 1 unkown and 1 ignored files? (yN) n
202 permanently delete 1 unkown and 1 ignored files? (yN) n
204 abort: removal cancelled
203 abort: removal cancelled
205 [250]
204 [250]
206 $ hg purge -v --all
205 $ hg purge -v --all
207 removing file ignored
206 removing file ignored
208 removing file untracked_file
207 removing file untracked_file
209 $ ls
208 $ ls
210 directory
209 directory
211 r1
210 r1
212
211
213 abort with missing files until we support name mangling filesystems
212 abort with missing files until we support name mangling filesystems
214
213
215 $ touch untracked_file
214 $ touch untracked_file
216 $ rm r1
215 $ rm r1
217
216
218 hide error messages to avoid changing the output when the text changes
217 hide error messages to avoid changing the output when the text changes
219
218
220 $ hg purge -p 2> /dev/null
219 $ hg purge -p 2> /dev/null
221 untracked_file
220 untracked_file
222 $ hg st
221 $ hg st
223 ! r1
222 ! r1
224 ? untracked_file
223 ? untracked_file
225
224
226 $ hg purge -p
225 $ hg purge -p
227 untracked_file
226 untracked_file
228 $ hg purge -v 2> /dev/null
227 $ hg purge -v 2> /dev/null
229 removing file untracked_file
228 removing file untracked_file
230 $ hg st
229 $ hg st
231 ! r1
230 ! r1
232
231
233 $ hg purge -v
232 $ hg purge -v
234 $ hg revert --all --quiet
233 $ hg revert --all --quiet
235 $ hg st -a
234 $ hg st -a
236
235
237 tracked file in ignored directory (issue621)
236 tracked file in ignored directory (issue621)
238
237
239 $ echo directory >> .hgignore
238 $ echo directory >> .hgignore
240 $ hg ci -m 'ignore directory'
239 $ hg ci -m 'ignore directory'
241 $ touch untracked_file
240 $ touch untracked_file
242 $ hg purge -p
241 $ hg purge -p
243 untracked_file
242 untracked_file
244 $ hg purge -v
243 $ hg purge -v
245 removing file untracked_file
244 removing file untracked_file
246
245
247 skip excluded files
246 skip excluded files
248
247
249 $ touch excluded_file
248 $ touch excluded_file
250 $ hg purge -p -X excluded_file
249 $ hg purge -p -X excluded_file
251 $ hg purge -v -X excluded_file
250 $ hg purge -v -X excluded_file
252 $ ls -A
251 $ ls -A
253 .hg
252 .hg
254 .hgignore
253 .hgignore
255 directory
254 directory
256 excluded_file
255 excluded_file
257 r1
256 r1
258 $ rm excluded_file
257 $ rm excluded_file
259
258
260 skip files in excluded dirs
259 skip files in excluded dirs
261
260
262 $ mkdir excluded_dir
261 $ mkdir excluded_dir
263 $ touch excluded_dir/file
262 $ touch excluded_dir/file
264 $ hg purge -p -X excluded_dir
263 $ hg purge -p -X excluded_dir
265 $ hg purge -v -X excluded_dir
264 $ hg purge -v -X excluded_dir
266 $ ls -A
265 $ ls -A
267 .hg
266 .hg
268 .hgignore
267 .hgignore
269 directory
268 directory
270 excluded_dir
269 excluded_dir
271 r1
270 r1
272 $ ls excluded_dir
271 $ ls excluded_dir
273 file
272 file
274 $ rm -R excluded_dir
273 $ rm -R excluded_dir
275
274
276 skip excluded empty dirs
275 skip excluded empty dirs
277
276
278 $ mkdir excluded_dir
277 $ mkdir excluded_dir
279 $ hg purge -p -X excluded_dir
278 $ hg purge -p -X excluded_dir
280 $ hg purge -v -X excluded_dir
279 $ hg purge -v -X excluded_dir
281 $ ls -A
280 $ ls -A
282 .hg
281 .hg
283 .hgignore
282 .hgignore
284 directory
283 directory
285 excluded_dir
284 excluded_dir
286 r1
285 r1
287 $ rmdir excluded_dir
286 $ rmdir excluded_dir
288
287
289 skip patterns
288 skip patterns
290
289
291 $ mkdir .svn
290 $ mkdir .svn
292 $ touch .svn/foo
291 $ touch .svn/foo
293 $ mkdir directory/.svn
292 $ mkdir directory/.svn
294 $ touch directory/.svn/foo
293 $ touch directory/.svn/foo
295 $ hg purge -p -X .svn -X '*/.svn'
294 $ hg purge -p -X .svn -X '*/.svn'
296 $ hg purge -p -X re:.*.svn
295 $ hg purge -p -X re:.*.svn
297
296
298 $ rm -R .svn directory r1
297 $ rm -R .svn directory r1
299
298
300 only remove files
299 only remove files
301
300
302 $ mkdir -p empty_dir dir
301 $ mkdir -p empty_dir dir
303 $ touch untracked_file dir/untracked_file
302 $ touch untracked_file dir/untracked_file
304 $ hg purge -p --files
303 $ hg purge -p --files
305 dir/untracked_file
304 dir/untracked_file
306 untracked_file
305 untracked_file
307 $ hg purge -v --files
306 $ hg purge -v --files
308 removing file dir/untracked_file
307 removing file dir/untracked_file
309 removing file untracked_file
308 removing file untracked_file
310 $ ls -A
309 $ ls -A
311 .hg
310 .hg
312 .hgignore
311 .hgignore
313 dir
312 dir
314 empty_dir
313 empty_dir
315 $ ls dir
314 $ ls dir
316
315
317 only remove dirs
316 only remove dirs
318
317
319 $ mkdir -p empty_dir dir
318 $ mkdir -p empty_dir dir
320 $ touch untracked_file dir/untracked_file
319 $ touch untracked_file dir/untracked_file
321 $ hg purge -p --dirs
320 $ hg purge -p --dirs
322 empty_dir
321 empty_dir
323 $ hg purge -v --dirs
322 $ hg purge -v --dirs
324 removing directory empty_dir
323 removing directory empty_dir
325 $ ls -A
324 $ ls -A
326 .hg
325 .hg
327 .hgignore
326 .hgignore
328 dir
327 dir
329 untracked_file
328 untracked_file
330 $ ls dir
329 $ ls dir
331 untracked_file
330 untracked_file
332
331
333 remove both files and dirs
332 remove both files and dirs
334
333
335 $ mkdir -p empty_dir dir
334 $ mkdir -p empty_dir dir
336 $ touch untracked_file dir/untracked_file
335 $ touch untracked_file dir/untracked_file
337 $ hg purge -p --files --dirs
336 $ hg purge -p --files --dirs
338 dir/untracked_file
337 dir/untracked_file
339 untracked_file
338 untracked_file
340 empty_dir
339 empty_dir
341 $ hg purge -v --files --dirs
340 $ hg purge -v --files --dirs
342 removing file dir/untracked_file
341 removing file dir/untracked_file
343 removing file untracked_file
342 removing file untracked_file
344 removing directory empty_dir
343 removing directory empty_dir
345 removing directory dir
344 removing directory dir
346 $ ls -A
345 $ ls -A
347 .hg
346 .hg
348 .hgignore
347 .hgignore
349
348
350 $ cd ..
349 $ cd ..
@@ -1,968 +1,961 b''
1 #testcases dirstate-v1 dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
2
3 #if no-rust
4 $ hg init repo0 --config format.exp-dirstate-v2=1
5 abort: dirstate v2 format requested by config but not supported (requires Rust extensions)
6 [255]
7 #endif
8
9 #if dirstate-v2
3 #if dirstate-v2
10 #require rust
11 $ echo '[format]' >> $HGRCPATH
4 $ echo '[format]' >> $HGRCPATH
12 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
5 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
13 #endif
6 #endif
14
7
15 $ hg init repo1
8 $ hg init repo1
16 $ cd repo1
9 $ cd repo1
17 $ mkdir a b a/1 b/1 b/2
10 $ mkdir a b a/1 b/1 b/2
18 $ touch in_root a/in_a b/in_b a/1/in_a_1 b/1/in_b_1 b/2/in_b_2
11 $ touch in_root a/in_a b/in_b a/1/in_a_1 b/1/in_b_1 b/2/in_b_2
19
12
20 hg status in repo root:
13 hg status in repo root:
21
14
22 $ hg status
15 $ hg status
23 ? a/1/in_a_1
16 ? a/1/in_a_1
24 ? a/in_a
17 ? a/in_a
25 ? b/1/in_b_1
18 ? b/1/in_b_1
26 ? b/2/in_b_2
19 ? b/2/in_b_2
27 ? b/in_b
20 ? b/in_b
28 ? in_root
21 ? in_root
29
22
30 hg status . in repo root:
23 hg status . in repo root:
31
24
32 $ hg status .
25 $ hg status .
33 ? a/1/in_a_1
26 ? a/1/in_a_1
34 ? a/in_a
27 ? a/in_a
35 ? b/1/in_b_1
28 ? b/1/in_b_1
36 ? b/2/in_b_2
29 ? b/2/in_b_2
37 ? b/in_b
30 ? b/in_b
38 ? in_root
31 ? in_root
39
32
40 $ hg status --cwd a
33 $ hg status --cwd a
41 ? a/1/in_a_1
34 ? a/1/in_a_1
42 ? a/in_a
35 ? a/in_a
43 ? b/1/in_b_1
36 ? b/1/in_b_1
44 ? b/2/in_b_2
37 ? b/2/in_b_2
45 ? b/in_b
38 ? b/in_b
46 ? in_root
39 ? in_root
47 $ hg status --cwd a .
40 $ hg status --cwd a .
48 ? 1/in_a_1
41 ? 1/in_a_1
49 ? in_a
42 ? in_a
50 $ hg status --cwd a ..
43 $ hg status --cwd a ..
51 ? 1/in_a_1
44 ? 1/in_a_1
52 ? in_a
45 ? in_a
53 ? ../b/1/in_b_1
46 ? ../b/1/in_b_1
54 ? ../b/2/in_b_2
47 ? ../b/2/in_b_2
55 ? ../b/in_b
48 ? ../b/in_b
56 ? ../in_root
49 ? ../in_root
57
50
58 $ hg status --cwd b
51 $ hg status --cwd b
59 ? a/1/in_a_1
52 ? a/1/in_a_1
60 ? a/in_a
53 ? a/in_a
61 ? b/1/in_b_1
54 ? b/1/in_b_1
62 ? b/2/in_b_2
55 ? b/2/in_b_2
63 ? b/in_b
56 ? b/in_b
64 ? in_root
57 ? in_root
65 $ hg status --cwd b .
58 $ hg status --cwd b .
66 ? 1/in_b_1
59 ? 1/in_b_1
67 ? 2/in_b_2
60 ? 2/in_b_2
68 ? in_b
61 ? in_b
69 $ hg status --cwd b ..
62 $ hg status --cwd b ..
70 ? ../a/1/in_a_1
63 ? ../a/1/in_a_1
71 ? ../a/in_a
64 ? ../a/in_a
72 ? 1/in_b_1
65 ? 1/in_b_1
73 ? 2/in_b_2
66 ? 2/in_b_2
74 ? in_b
67 ? in_b
75 ? ../in_root
68 ? ../in_root
76
69
77 $ hg status --cwd a/1
70 $ hg status --cwd a/1
78 ? a/1/in_a_1
71 ? a/1/in_a_1
79 ? a/in_a
72 ? a/in_a
80 ? b/1/in_b_1
73 ? b/1/in_b_1
81 ? b/2/in_b_2
74 ? b/2/in_b_2
82 ? b/in_b
75 ? b/in_b
83 ? in_root
76 ? in_root
84 $ hg status --cwd a/1 .
77 $ hg status --cwd a/1 .
85 ? in_a_1
78 ? in_a_1
86 $ hg status --cwd a/1 ..
79 $ hg status --cwd a/1 ..
87 ? in_a_1
80 ? in_a_1
88 ? ../in_a
81 ? ../in_a
89
82
90 $ hg status --cwd b/1
83 $ hg status --cwd b/1
91 ? a/1/in_a_1
84 ? a/1/in_a_1
92 ? a/in_a
85 ? a/in_a
93 ? b/1/in_b_1
86 ? b/1/in_b_1
94 ? b/2/in_b_2
87 ? b/2/in_b_2
95 ? b/in_b
88 ? b/in_b
96 ? in_root
89 ? in_root
97 $ hg status --cwd b/1 .
90 $ hg status --cwd b/1 .
98 ? in_b_1
91 ? in_b_1
99 $ hg status --cwd b/1 ..
92 $ hg status --cwd b/1 ..
100 ? in_b_1
93 ? in_b_1
101 ? ../2/in_b_2
94 ? ../2/in_b_2
102 ? ../in_b
95 ? ../in_b
103
96
104 $ hg status --cwd b/2
97 $ hg status --cwd b/2
105 ? a/1/in_a_1
98 ? a/1/in_a_1
106 ? a/in_a
99 ? a/in_a
107 ? b/1/in_b_1
100 ? b/1/in_b_1
108 ? b/2/in_b_2
101 ? b/2/in_b_2
109 ? b/in_b
102 ? b/in_b
110 ? in_root
103 ? in_root
111 $ hg status --cwd b/2 .
104 $ hg status --cwd b/2 .
112 ? in_b_2
105 ? in_b_2
113 $ hg status --cwd b/2 ..
106 $ hg status --cwd b/2 ..
114 ? ../1/in_b_1
107 ? ../1/in_b_1
115 ? in_b_2
108 ? in_b_2
116 ? ../in_b
109 ? ../in_b
117
110
118 combining patterns with root and patterns without a root works
111 combining patterns with root and patterns without a root works
119
112
120 $ hg st a/in_a re:.*b$
113 $ hg st a/in_a re:.*b$
121 ? a/in_a
114 ? a/in_a
122 ? b/in_b
115 ? b/in_b
123
116
124 tweaking defaults works
117 tweaking defaults works
125 $ hg status --cwd a --config ui.tweakdefaults=yes
118 $ hg status --cwd a --config ui.tweakdefaults=yes
126 ? 1/in_a_1
119 ? 1/in_a_1
127 ? in_a
120 ? in_a
128 ? ../b/1/in_b_1
121 ? ../b/1/in_b_1
129 ? ../b/2/in_b_2
122 ? ../b/2/in_b_2
130 ? ../b/in_b
123 ? ../b/in_b
131 ? ../in_root
124 ? ../in_root
132 $ HGPLAIN=1 hg status --cwd a --config ui.tweakdefaults=yes
125 $ HGPLAIN=1 hg status --cwd a --config ui.tweakdefaults=yes
133 ? a/1/in_a_1 (glob)
126 ? a/1/in_a_1 (glob)
134 ? a/in_a (glob)
127 ? a/in_a (glob)
135 ? b/1/in_b_1 (glob)
128 ? b/1/in_b_1 (glob)
136 ? b/2/in_b_2 (glob)
129 ? b/2/in_b_2 (glob)
137 ? b/in_b (glob)
130 ? b/in_b (glob)
138 ? in_root
131 ? in_root
139 $ HGPLAINEXCEPT=tweakdefaults hg status --cwd a --config ui.tweakdefaults=yes
132 $ HGPLAINEXCEPT=tweakdefaults hg status --cwd a --config ui.tweakdefaults=yes
140 ? 1/in_a_1
133 ? 1/in_a_1
141 ? in_a
134 ? in_a
142 ? ../b/1/in_b_1
135 ? ../b/1/in_b_1
143 ? ../b/2/in_b_2
136 ? ../b/2/in_b_2
144 ? ../b/in_b
137 ? ../b/in_b
145 ? ../in_root (glob)
138 ? ../in_root (glob)
146
139
147 relative paths can be requested
140 relative paths can be requested
148
141
149 $ hg status --cwd a --config ui.relative-paths=yes
142 $ hg status --cwd a --config ui.relative-paths=yes
150 ? 1/in_a_1
143 ? 1/in_a_1
151 ? in_a
144 ? in_a
152 ? ../b/1/in_b_1
145 ? ../b/1/in_b_1
153 ? ../b/2/in_b_2
146 ? ../b/2/in_b_2
154 ? ../b/in_b
147 ? ../b/in_b
155 ? ../in_root
148 ? ../in_root
156
149
157 $ hg status --cwd a . --config ui.relative-paths=legacy
150 $ hg status --cwd a . --config ui.relative-paths=legacy
158 ? 1/in_a_1
151 ? 1/in_a_1
159 ? in_a
152 ? in_a
160 $ hg status --cwd a . --config ui.relative-paths=no
153 $ hg status --cwd a . --config ui.relative-paths=no
161 ? a/1/in_a_1
154 ? a/1/in_a_1
162 ? a/in_a
155 ? a/in_a
163
156
164 commands.status.relative overrides ui.relative-paths
157 commands.status.relative overrides ui.relative-paths
165
158
166 $ cat >> $HGRCPATH <<EOF
159 $ cat >> $HGRCPATH <<EOF
167 > [ui]
160 > [ui]
168 > relative-paths = False
161 > relative-paths = False
169 > [commands]
162 > [commands]
170 > status.relative = True
163 > status.relative = True
171 > EOF
164 > EOF
172 $ hg status --cwd a
165 $ hg status --cwd a
173 ? 1/in_a_1
166 ? 1/in_a_1
174 ? in_a
167 ? in_a
175 ? ../b/1/in_b_1
168 ? ../b/1/in_b_1
176 ? ../b/2/in_b_2
169 ? ../b/2/in_b_2
177 ? ../b/in_b
170 ? ../b/in_b
178 ? ../in_root
171 ? ../in_root
179 $ HGPLAIN=1 hg status --cwd a
172 $ HGPLAIN=1 hg status --cwd a
180 ? a/1/in_a_1 (glob)
173 ? a/1/in_a_1 (glob)
181 ? a/in_a (glob)
174 ? a/in_a (glob)
182 ? b/1/in_b_1 (glob)
175 ? b/1/in_b_1 (glob)
183 ? b/2/in_b_2 (glob)
176 ? b/2/in_b_2 (glob)
184 ? b/in_b (glob)
177 ? b/in_b (glob)
185 ? in_root
178 ? in_root
186
179
187 if relative paths are explicitly off, tweakdefaults doesn't change it
180 if relative paths are explicitly off, tweakdefaults doesn't change it
188 $ cat >> $HGRCPATH <<EOF
181 $ cat >> $HGRCPATH <<EOF
189 > [commands]
182 > [commands]
190 > status.relative = False
183 > status.relative = False
191 > EOF
184 > EOF
192 $ hg status --cwd a --config ui.tweakdefaults=yes
185 $ hg status --cwd a --config ui.tweakdefaults=yes
193 ? a/1/in_a_1
186 ? a/1/in_a_1
194 ? a/in_a
187 ? a/in_a
195 ? b/1/in_b_1
188 ? b/1/in_b_1
196 ? b/2/in_b_2
189 ? b/2/in_b_2
197 ? b/in_b
190 ? b/in_b
198 ? in_root
191 ? in_root
199
192
200 $ cd ..
193 $ cd ..
201
194
202 $ hg init repo2
195 $ hg init repo2
203 $ cd repo2
196 $ cd repo2
204 $ touch modified removed deleted ignored
197 $ touch modified removed deleted ignored
205 $ echo "^ignored$" > .hgignore
198 $ echo "^ignored$" > .hgignore
206 $ hg ci -A -m 'initial checkin'
199 $ hg ci -A -m 'initial checkin'
207 adding .hgignore
200 adding .hgignore
208 adding deleted
201 adding deleted
209 adding modified
202 adding modified
210 adding removed
203 adding removed
211 $ touch modified added unknown ignored
204 $ touch modified added unknown ignored
212 $ hg add added
205 $ hg add added
213 $ hg remove removed
206 $ hg remove removed
214 $ rm deleted
207 $ rm deleted
215
208
216 hg status:
209 hg status:
217
210
218 $ hg status
211 $ hg status
219 A added
212 A added
220 R removed
213 R removed
221 ! deleted
214 ! deleted
222 ? unknown
215 ? unknown
223
216
224 hg status modified added removed deleted unknown never-existed ignored:
217 hg status modified added removed deleted unknown never-existed ignored:
225
218
226 $ hg status modified added removed deleted unknown never-existed ignored
219 $ hg status modified added removed deleted unknown never-existed ignored
227 never-existed: * (glob)
220 never-existed: * (glob)
228 A added
221 A added
229 R removed
222 R removed
230 ! deleted
223 ! deleted
231 ? unknown
224 ? unknown
232
225
233 $ hg copy modified copied
226 $ hg copy modified copied
234
227
235 hg status -C:
228 hg status -C:
236
229
237 $ hg status -C
230 $ hg status -C
238 A added
231 A added
239 A copied
232 A copied
240 modified
233 modified
241 R removed
234 R removed
242 ! deleted
235 ! deleted
243 ? unknown
236 ? unknown
244
237
245 hg status -A:
238 hg status -A:
246
239
247 $ hg status -A
240 $ hg status -A
248 A added
241 A added
249 A copied
242 A copied
250 modified
243 modified
251 R removed
244 R removed
252 ! deleted
245 ! deleted
253 ? unknown
246 ? unknown
254 I ignored
247 I ignored
255 C .hgignore
248 C .hgignore
256 C modified
249 C modified
257
250
258 $ hg status -A -T '{status} {path} {node|shortest}\n'
251 $ hg status -A -T '{status} {path} {node|shortest}\n'
259 A added ffff
252 A added ffff
260 A copied ffff
253 A copied ffff
261 R removed ffff
254 R removed ffff
262 ! deleted ffff
255 ! deleted ffff
263 ? unknown ffff
256 ? unknown ffff
264 I ignored ffff
257 I ignored ffff
265 C .hgignore ffff
258 C .hgignore ffff
266 C modified ffff
259 C modified ffff
267
260
268 $ hg status -A -Tjson
261 $ hg status -A -Tjson
269 [
262 [
270 {
263 {
271 "itemtype": "file",
264 "itemtype": "file",
272 "path": "added",
265 "path": "added",
273 "status": "A"
266 "status": "A"
274 },
267 },
275 {
268 {
276 "itemtype": "file",
269 "itemtype": "file",
277 "path": "copied",
270 "path": "copied",
278 "source": "modified",
271 "source": "modified",
279 "status": "A"
272 "status": "A"
280 },
273 },
281 {
274 {
282 "itemtype": "file",
275 "itemtype": "file",
283 "path": "removed",
276 "path": "removed",
284 "status": "R"
277 "status": "R"
285 },
278 },
286 {
279 {
287 "itemtype": "file",
280 "itemtype": "file",
288 "path": "deleted",
281 "path": "deleted",
289 "status": "!"
282 "status": "!"
290 },
283 },
291 {
284 {
292 "itemtype": "file",
285 "itemtype": "file",
293 "path": "unknown",
286 "path": "unknown",
294 "status": "?"
287 "status": "?"
295 },
288 },
296 {
289 {
297 "itemtype": "file",
290 "itemtype": "file",
298 "path": "ignored",
291 "path": "ignored",
299 "status": "I"
292 "status": "I"
300 },
293 },
301 {
294 {
302 "itemtype": "file",
295 "itemtype": "file",
303 "path": ".hgignore",
296 "path": ".hgignore",
304 "status": "C"
297 "status": "C"
305 },
298 },
306 {
299 {
307 "itemtype": "file",
300 "itemtype": "file",
308 "path": "modified",
301 "path": "modified",
309 "status": "C"
302 "status": "C"
310 }
303 }
311 ]
304 ]
312
305
313 $ hg status -A -Tpickle > pickle
306 $ hg status -A -Tpickle > pickle
314 >>> from __future__ import print_function
307 >>> from __future__ import print_function
315 >>> from mercurial import util
308 >>> from mercurial import util
316 >>> pickle = util.pickle
309 >>> pickle = util.pickle
317 >>> data = sorted((x[b'status'].decode(), x[b'path'].decode()) for x in pickle.load(open("pickle", r"rb")))
310 >>> data = sorted((x[b'status'].decode(), x[b'path'].decode()) for x in pickle.load(open("pickle", r"rb")))
318 >>> for s, p in data: print("%s %s" % (s, p))
311 >>> for s, p in data: print("%s %s" % (s, p))
319 ! deleted
312 ! deleted
320 ? pickle
313 ? pickle
321 ? unknown
314 ? unknown
322 A added
315 A added
323 A copied
316 A copied
324 C .hgignore
317 C .hgignore
325 C modified
318 C modified
326 I ignored
319 I ignored
327 R removed
320 R removed
328 $ rm pickle
321 $ rm pickle
329
322
330 $ echo "^ignoreddir$" > .hgignore
323 $ echo "^ignoreddir$" > .hgignore
331 $ mkdir ignoreddir
324 $ mkdir ignoreddir
332 $ touch ignoreddir/file
325 $ touch ignoreddir/file
333
326
334 Test templater support:
327 Test templater support:
335
328
336 $ hg status -AT "[{status}]\t{if(source, '{source} -> ')}{path}\n"
329 $ hg status -AT "[{status}]\t{if(source, '{source} -> ')}{path}\n"
337 [M] .hgignore
330 [M] .hgignore
338 [A] added
331 [A] added
339 [A] modified -> copied
332 [A] modified -> copied
340 [R] removed
333 [R] removed
341 [!] deleted
334 [!] deleted
342 [?] ignored
335 [?] ignored
343 [?] unknown
336 [?] unknown
344 [I] ignoreddir/file
337 [I] ignoreddir/file
345 [C] modified
338 [C] modified
346 $ hg status -AT default
339 $ hg status -AT default
347 M .hgignore
340 M .hgignore
348 A added
341 A added
349 A copied
342 A copied
350 modified
343 modified
351 R removed
344 R removed
352 ! deleted
345 ! deleted
353 ? ignored
346 ? ignored
354 ? unknown
347 ? unknown
355 I ignoreddir/file
348 I ignoreddir/file
356 C modified
349 C modified
357 $ hg status -T compact
350 $ hg status -T compact
358 abort: "status" not in template map
351 abort: "status" not in template map
359 [255]
352 [255]
360
353
361 hg status ignoreddir/file:
354 hg status ignoreddir/file:
362
355
363 $ hg status ignoreddir/file
356 $ hg status ignoreddir/file
364
357
365 hg status -i ignoreddir/file:
358 hg status -i ignoreddir/file:
366
359
367 $ hg status -i ignoreddir/file
360 $ hg status -i ignoreddir/file
368 I ignoreddir/file
361 I ignoreddir/file
369 $ cd ..
362 $ cd ..
370
363
371 Check 'status -q' and some combinations
364 Check 'status -q' and some combinations
372
365
373 $ hg init repo3
366 $ hg init repo3
374 $ cd repo3
367 $ cd repo3
375 $ touch modified removed deleted ignored
368 $ touch modified removed deleted ignored
376 $ echo "^ignored$" > .hgignore
369 $ echo "^ignored$" > .hgignore
377 $ hg commit -A -m 'initial checkin'
370 $ hg commit -A -m 'initial checkin'
378 adding .hgignore
371 adding .hgignore
379 adding deleted
372 adding deleted
380 adding modified
373 adding modified
381 adding removed
374 adding removed
382 $ touch added unknown ignored
375 $ touch added unknown ignored
383 $ hg add added
376 $ hg add added
384 $ echo "test" >> modified
377 $ echo "test" >> modified
385 $ hg remove removed
378 $ hg remove removed
386 $ rm deleted
379 $ rm deleted
387 $ hg copy modified copied
380 $ hg copy modified copied
388
381
389 Specify working directory revision explicitly, that should be the same as
382 Specify working directory revision explicitly, that should be the same as
390 "hg status"
383 "hg status"
391
384
392 $ hg status --change "wdir()"
385 $ hg status --change "wdir()"
393 M modified
386 M modified
394 A added
387 A added
395 A copied
388 A copied
396 R removed
389 R removed
397 ! deleted
390 ! deleted
398 ? unknown
391 ? unknown
399
392
400 Run status with 2 different flags.
393 Run status with 2 different flags.
401 Check if result is the same or different.
394 Check if result is the same or different.
402 If result is not as expected, raise error
395 If result is not as expected, raise error
403
396
404 $ assert() {
397 $ assert() {
405 > hg status $1 > ../a
398 > hg status $1 > ../a
406 > hg status $2 > ../b
399 > hg status $2 > ../b
407 > if diff ../a ../b > /dev/null; then
400 > if diff ../a ../b > /dev/null; then
408 > out=0
401 > out=0
409 > else
402 > else
410 > out=1
403 > out=1
411 > fi
404 > fi
412 > if [ $3 -eq 0 ]; then
405 > if [ $3 -eq 0 ]; then
413 > df="same"
406 > df="same"
414 > else
407 > else
415 > df="different"
408 > df="different"
416 > fi
409 > fi
417 > if [ $out -ne $3 ]; then
410 > if [ $out -ne $3 ]; then
418 > echo "Error on $1 and $2, should be $df."
411 > echo "Error on $1 and $2, should be $df."
419 > fi
412 > fi
420 > }
413 > }
421
414
422 Assert flag1 flag2 [0-same | 1-different]
415 Assert flag1 flag2 [0-same | 1-different]
423
416
424 $ assert "-q" "-mard" 0
417 $ assert "-q" "-mard" 0
425 $ assert "-A" "-marduicC" 0
418 $ assert "-A" "-marduicC" 0
426 $ assert "-qA" "-mardcC" 0
419 $ assert "-qA" "-mardcC" 0
427 $ assert "-qAui" "-A" 0
420 $ assert "-qAui" "-A" 0
428 $ assert "-qAu" "-marducC" 0
421 $ assert "-qAu" "-marducC" 0
429 $ assert "-qAi" "-mardicC" 0
422 $ assert "-qAi" "-mardicC" 0
430 $ assert "-qu" "-u" 0
423 $ assert "-qu" "-u" 0
431 $ assert "-q" "-u" 1
424 $ assert "-q" "-u" 1
432 $ assert "-m" "-a" 1
425 $ assert "-m" "-a" 1
433 $ assert "-r" "-d" 1
426 $ assert "-r" "-d" 1
434 $ cd ..
427 $ cd ..
435
428
436 $ hg init repo4
429 $ hg init repo4
437 $ cd repo4
430 $ cd repo4
438 $ touch modified removed deleted
431 $ touch modified removed deleted
439 $ hg ci -q -A -m 'initial checkin'
432 $ hg ci -q -A -m 'initial checkin'
440 $ touch added unknown
433 $ touch added unknown
441 $ hg add added
434 $ hg add added
442 $ hg remove removed
435 $ hg remove removed
443 $ rm deleted
436 $ rm deleted
444 $ echo x > modified
437 $ echo x > modified
445 $ hg copy modified copied
438 $ hg copy modified copied
446 $ hg ci -m 'test checkin' -d "1000001 0"
439 $ hg ci -m 'test checkin' -d "1000001 0"
447 $ rm *
440 $ rm *
448 $ touch unrelated
441 $ touch unrelated
449 $ hg ci -q -A -m 'unrelated checkin' -d "1000002 0"
442 $ hg ci -q -A -m 'unrelated checkin' -d "1000002 0"
450
443
451 hg status --change 1:
444 hg status --change 1:
452
445
453 $ hg status --change 1
446 $ hg status --change 1
454 M modified
447 M modified
455 A added
448 A added
456 A copied
449 A copied
457 R removed
450 R removed
458
451
459 hg status --change 1 unrelated:
452 hg status --change 1 unrelated:
460
453
461 $ hg status --change 1 unrelated
454 $ hg status --change 1 unrelated
462
455
463 hg status -C --change 1 added modified copied removed deleted:
456 hg status -C --change 1 added modified copied removed deleted:
464
457
465 $ hg status -C --change 1 added modified copied removed deleted
458 $ hg status -C --change 1 added modified copied removed deleted
466 M modified
459 M modified
467 A added
460 A added
468 A copied
461 A copied
469 modified
462 modified
470 R removed
463 R removed
471
464
472 hg status -A --change 1 and revset:
465 hg status -A --change 1 and revset:
473
466
474 $ hg status -A --change '1|1'
467 $ hg status -A --change '1|1'
475 M modified
468 M modified
476 A added
469 A added
477 A copied
470 A copied
478 modified
471 modified
479 R removed
472 R removed
480 C deleted
473 C deleted
481
474
482 $ cd ..
475 $ cd ..
483
476
484 hg status with --rev and reverted changes:
477 hg status with --rev and reverted changes:
485
478
486 $ hg init reverted-changes-repo
479 $ hg init reverted-changes-repo
487 $ cd reverted-changes-repo
480 $ cd reverted-changes-repo
488 $ echo a > file
481 $ echo a > file
489 $ hg add file
482 $ hg add file
490 $ hg ci -m a
483 $ hg ci -m a
491 $ echo b > file
484 $ echo b > file
492 $ hg ci -m b
485 $ hg ci -m b
493
486
494 reverted file should appear clean
487 reverted file should appear clean
495
488
496 $ hg revert -r 0 .
489 $ hg revert -r 0 .
497 reverting file
490 reverting file
498 $ hg status -A --rev 0
491 $ hg status -A --rev 0
499 C file
492 C file
500
493
501 #if execbit
494 #if execbit
502 reverted file with changed flag should appear modified
495 reverted file with changed flag should appear modified
503
496
504 $ chmod +x file
497 $ chmod +x file
505 $ hg status -A --rev 0
498 $ hg status -A --rev 0
506 M file
499 M file
507
500
508 $ hg revert -r 0 .
501 $ hg revert -r 0 .
509 reverting file
502 reverting file
510
503
511 reverted and committed file with changed flag should appear modified
504 reverted and committed file with changed flag should appear modified
512
505
513 $ hg co -C .
506 $ hg co -C .
514 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
507 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
515 $ chmod +x file
508 $ chmod +x file
516 $ hg ci -m 'change flag'
509 $ hg ci -m 'change flag'
517 $ hg status -A --rev 1 --rev 2
510 $ hg status -A --rev 1 --rev 2
518 M file
511 M file
519 $ hg diff -r 1 -r 2
512 $ hg diff -r 1 -r 2
520
513
521 #endif
514 #endif
522
515
523 $ cd ..
516 $ cd ..
524
517
525 hg status of binary file starting with '\1\n', a separator for metadata:
518 hg status of binary file starting with '\1\n', a separator for metadata:
526
519
527 $ hg init repo5
520 $ hg init repo5
528 $ cd repo5
521 $ cd repo5
529 >>> open("010a", r"wb").write(b"\1\nfoo") and None
522 >>> open("010a", r"wb").write(b"\1\nfoo") and None
530 $ hg ci -q -A -m 'initial checkin'
523 $ hg ci -q -A -m 'initial checkin'
531 $ hg status -A
524 $ hg status -A
532 C 010a
525 C 010a
533
526
534 >>> open("010a", r"wb").write(b"\1\nbar") and None
527 >>> open("010a", r"wb").write(b"\1\nbar") and None
535 $ hg status -A
528 $ hg status -A
536 M 010a
529 M 010a
537 $ hg ci -q -m 'modify 010a'
530 $ hg ci -q -m 'modify 010a'
538 $ hg status -A --rev 0:1
531 $ hg status -A --rev 0:1
539 M 010a
532 M 010a
540
533
541 $ touch empty
534 $ touch empty
542 $ hg ci -q -A -m 'add another file'
535 $ hg ci -q -A -m 'add another file'
543 $ hg status -A --rev 1:2 010a
536 $ hg status -A --rev 1:2 010a
544 C 010a
537 C 010a
545
538
546 $ cd ..
539 $ cd ..
547
540
548 test "hg status" with "directory pattern" which matches against files
541 test "hg status" with "directory pattern" which matches against files
549 only known on target revision.
542 only known on target revision.
550
543
551 $ hg init repo6
544 $ hg init repo6
552 $ cd repo6
545 $ cd repo6
553
546
554 $ echo a > a.txt
547 $ echo a > a.txt
555 $ hg add a.txt
548 $ hg add a.txt
556 $ hg commit -m '#0'
549 $ hg commit -m '#0'
557 $ mkdir -p 1/2/3/4/5
550 $ mkdir -p 1/2/3/4/5
558 $ echo b > 1/2/3/4/5/b.txt
551 $ echo b > 1/2/3/4/5/b.txt
559 $ hg add 1/2/3/4/5/b.txt
552 $ hg add 1/2/3/4/5/b.txt
560 $ hg commit -m '#1'
553 $ hg commit -m '#1'
561
554
562 $ hg update -C 0 > /dev/null
555 $ hg update -C 0 > /dev/null
563 $ hg status -A
556 $ hg status -A
564 C a.txt
557 C a.txt
565
558
566 the directory matching against specified pattern should be removed,
559 the directory matching against specified pattern should be removed,
567 because directory existence prevents 'dirstate.walk()' from showing
560 because directory existence prevents 'dirstate.walk()' from showing
568 warning message about such pattern.
561 warning message about such pattern.
569
562
570 $ test ! -d 1
563 $ test ! -d 1
571 $ hg status -A --rev 1 1/2/3/4/5/b.txt
564 $ hg status -A --rev 1 1/2/3/4/5/b.txt
572 R 1/2/3/4/5/b.txt
565 R 1/2/3/4/5/b.txt
573 $ hg status -A --rev 1 1/2/3/4/5
566 $ hg status -A --rev 1 1/2/3/4/5
574 R 1/2/3/4/5/b.txt
567 R 1/2/3/4/5/b.txt
575 $ hg status -A --rev 1 1/2/3
568 $ hg status -A --rev 1 1/2/3
576 R 1/2/3/4/5/b.txt
569 R 1/2/3/4/5/b.txt
577 $ hg status -A --rev 1 1
570 $ hg status -A --rev 1 1
578 R 1/2/3/4/5/b.txt
571 R 1/2/3/4/5/b.txt
579
572
580 $ hg status --config ui.formatdebug=True --rev 1 1
573 $ hg status --config ui.formatdebug=True --rev 1 1
581 status = [
574 status = [
582 {
575 {
583 'itemtype': 'file',
576 'itemtype': 'file',
584 'path': '1/2/3/4/5/b.txt',
577 'path': '1/2/3/4/5/b.txt',
585 'status': 'R'
578 'status': 'R'
586 },
579 },
587 ]
580 ]
588
581
589 #if windows
582 #if windows
590 $ hg --config ui.slash=false status -A --rev 1 1
583 $ hg --config ui.slash=false status -A --rev 1 1
591 R 1\2\3\4\5\b.txt
584 R 1\2\3\4\5\b.txt
592 #endif
585 #endif
593
586
594 $ cd ..
587 $ cd ..
595
588
596 Status after move overwriting a file (issue4458)
589 Status after move overwriting a file (issue4458)
597 =================================================
590 =================================================
598
591
599
592
600 $ hg init issue4458
593 $ hg init issue4458
601 $ cd issue4458
594 $ cd issue4458
602 $ echo a > a
595 $ echo a > a
603 $ echo b > b
596 $ echo b > b
604 $ hg commit -Am base
597 $ hg commit -Am base
605 adding a
598 adding a
606 adding b
599 adding b
607
600
608
601
609 with --force
602 with --force
610
603
611 $ hg mv b --force a
604 $ hg mv b --force a
612 $ hg st --copies
605 $ hg st --copies
613 M a
606 M a
614 b
607 b
615 R b
608 R b
616 $ hg revert --all
609 $ hg revert --all
617 reverting a
610 reverting a
618 undeleting b
611 undeleting b
619 $ rm *.orig
612 $ rm *.orig
620
613
621 without force
614 without force
622
615
623 $ hg rm a
616 $ hg rm a
624 $ hg st --copies
617 $ hg st --copies
625 R a
618 R a
626 $ hg mv b a
619 $ hg mv b a
627 $ hg st --copies
620 $ hg st --copies
628 M a
621 M a
629 b
622 b
630 R b
623 R b
631
624
632 using ui.statuscopies setting
625 using ui.statuscopies setting
633 $ hg st --config ui.statuscopies=true
626 $ hg st --config ui.statuscopies=true
634 M a
627 M a
635 b
628 b
636 R b
629 R b
637 $ hg st --config ui.statuscopies=false
630 $ hg st --config ui.statuscopies=false
638 M a
631 M a
639 R b
632 R b
640 $ hg st --config ui.tweakdefaults=yes
633 $ hg st --config ui.tweakdefaults=yes
641 M a
634 M a
642 b
635 b
643 R b
636 R b
644
637
645 using log status template (issue5155)
638 using log status template (issue5155)
646 $ hg log -Tstatus -r 'wdir()' -C
639 $ hg log -Tstatus -r 'wdir()' -C
647 changeset: 2147483647:ffffffffffff
640 changeset: 2147483647:ffffffffffff
648 parent: 0:8c55c58b4c0e
641 parent: 0:8c55c58b4c0e
649 user: test
642 user: test
650 date: * (glob)
643 date: * (glob)
651 files:
644 files:
652 M a
645 M a
653 b
646 b
654 R b
647 R b
655
648
656 $ hg log -GTstatus -r 'wdir()' -C
649 $ hg log -GTstatus -r 'wdir()' -C
657 o changeset: 2147483647:ffffffffffff
650 o changeset: 2147483647:ffffffffffff
658 | parent: 0:8c55c58b4c0e
651 | parent: 0:8c55c58b4c0e
659 ~ user: test
652 ~ user: test
660 date: * (glob)
653 date: * (glob)
661 files:
654 files:
662 M a
655 M a
663 b
656 b
664 R b
657 R b
665
658
666
659
667 Other "bug" highlight, the revision status does not report the copy information.
660 Other "bug" highlight, the revision status does not report the copy information.
668 This is buggy behavior.
661 This is buggy behavior.
669
662
670 $ hg commit -m 'blah'
663 $ hg commit -m 'blah'
671 $ hg st --copies --change .
664 $ hg st --copies --change .
672 M a
665 M a
673 R b
666 R b
674
667
675 using log status template, the copy information is displayed correctly.
668 using log status template, the copy information is displayed correctly.
676 $ hg log -Tstatus -r. -C
669 $ hg log -Tstatus -r. -C
677 changeset: 1:6685fde43d21
670 changeset: 1:6685fde43d21
678 tag: tip
671 tag: tip
679 user: test
672 user: test
680 date: * (glob)
673 date: * (glob)
681 summary: blah
674 summary: blah
682 files:
675 files:
683 M a
676 M a
684 b
677 b
685 R b
678 R b
686
679
687
680
688 $ cd ..
681 $ cd ..
689
682
690 Make sure .hg doesn't show up even as a symlink
683 Make sure .hg doesn't show up even as a symlink
691
684
692 $ hg init repo0
685 $ hg init repo0
693 $ mkdir symlink-repo0
686 $ mkdir symlink-repo0
694 $ cd symlink-repo0
687 $ cd symlink-repo0
695 $ ln -s ../repo0/.hg
688 $ ln -s ../repo0/.hg
696 $ hg status
689 $ hg status
697
690
698 If the size hasnt changed but mtime has, status needs to read the contents
691 If the size hasnt changed but mtime has, status needs to read the contents
699 of the file to check whether it has changed
692 of the file to check whether it has changed
700
693
701 $ echo 1 > a
694 $ echo 1 > a
702 $ echo 1 > b
695 $ echo 1 > b
703 $ touch -t 200102030000 a b
696 $ touch -t 200102030000 a b
704 $ hg commit -Aqm '#0'
697 $ hg commit -Aqm '#0'
705 $ echo 2 > a
698 $ echo 2 > a
706 $ touch -t 200102040000 a b
699 $ touch -t 200102040000 a b
707 $ hg status
700 $ hg status
708 M a
701 M a
709
702
710 Asking specifically for the status of a deleted/removed file
703 Asking specifically for the status of a deleted/removed file
711
704
712 $ rm a
705 $ rm a
713 $ rm b
706 $ rm b
714 $ hg status a
707 $ hg status a
715 ! a
708 ! a
716 $ hg rm a
709 $ hg rm a
717 $ hg rm b
710 $ hg rm b
718 $ hg status a
711 $ hg status a
719 R a
712 R a
720 $ hg commit -qm '#1'
713 $ hg commit -qm '#1'
721 $ hg status a
714 $ hg status a
722 a: $ENOENT$
715 a: $ENOENT$
723
716
724 Check using include flag with pattern when status does not need to traverse
717 Check using include flag with pattern when status does not need to traverse
725 the working directory (issue6483)
718 the working directory (issue6483)
726
719
727 $ cd ..
720 $ cd ..
728 $ hg init issue6483
721 $ hg init issue6483
729 $ cd issue6483
722 $ cd issue6483
730 $ touch a.py b.rs
723 $ touch a.py b.rs
731 $ hg add a.py b.rs
724 $ hg add a.py b.rs
732 $ hg st -aI "*.py"
725 $ hg st -aI "*.py"
733 A a.py
726 A a.py
734
727
735 Also check exclude pattern
728 Also check exclude pattern
736
729
737 $ hg st -aX "*.rs"
730 $ hg st -aX "*.rs"
738 A a.py
731 A a.py
739
732
740 issue6335
733 issue6335
741 When a directory containing a tracked file gets symlinked, as of 5.8
734 When a directory containing a tracked file gets symlinked, as of 5.8
742 `hg st` only gives the correct answer about clean (or deleted) files
735 `hg st` only gives the correct answer about clean (or deleted) files
743 if also listing unknowns.
736 if also listing unknowns.
744 The tree-based dirstate and status algorithm fix this:
737 The tree-based dirstate and status algorithm fix this:
745
738
746 #if symlink no-dirstate-v1
739 #if symlink no-dirstate-v1 rust
747
740
748 $ cd ..
741 $ cd ..
749 $ hg init issue6335
742 $ hg init issue6335
750 $ cd issue6335
743 $ cd issue6335
751 $ mkdir foo
744 $ mkdir foo
752 $ touch foo/a
745 $ touch foo/a
753 $ hg ci -Ama
746 $ hg ci -Ama
754 adding foo/a
747 adding foo/a
755 $ mv foo bar
748 $ mv foo bar
756 $ ln -s bar foo
749 $ ln -s bar foo
757 $ hg status
750 $ hg status
758 ! foo/a
751 ! foo/a
759 ? bar/a
752 ? bar/a
760 ? foo
753 ? foo
761
754
762 $ hg status -c # incorrect output with `dirstate-v1`
755 $ hg status -c # incorrect output without the Rust implementation
763 $ hg status -cu
756 $ hg status -cu
764 ? bar/a
757 ? bar/a
765 ? foo
758 ? foo
766 $ hg status -d # incorrect output with `dirstate-v1`
759 $ hg status -d # incorrect output without the Rust implementation
767 ! foo/a
760 ! foo/a
768 $ hg status -du
761 $ hg status -du
769 ! foo/a
762 ! foo/a
770 ? bar/a
763 ? bar/a
771 ? foo
764 ? foo
772
765
773 #endif
766 #endif
774
767
775
768
776 Create a repo with files in each possible status
769 Create a repo with files in each possible status
777
770
778 $ cd ..
771 $ cd ..
779 $ hg init repo7
772 $ hg init repo7
780 $ cd repo7
773 $ cd repo7
781 $ mkdir subdir
774 $ mkdir subdir
782 $ touch clean modified deleted removed
775 $ touch clean modified deleted removed
783 $ touch subdir/clean subdir/modified subdir/deleted subdir/removed
776 $ touch subdir/clean subdir/modified subdir/deleted subdir/removed
784 $ echo ignored > .hgignore
777 $ echo ignored > .hgignore
785 $ hg ci -Aqm '#0'
778 $ hg ci -Aqm '#0'
786 $ echo 1 > modified
779 $ echo 1 > modified
787 $ echo 1 > subdir/modified
780 $ echo 1 > subdir/modified
788 $ rm deleted
781 $ rm deleted
789 $ rm subdir/deleted
782 $ rm subdir/deleted
790 $ hg rm removed
783 $ hg rm removed
791 $ hg rm subdir/removed
784 $ hg rm subdir/removed
792 $ touch unknown ignored
785 $ touch unknown ignored
793 $ touch subdir/unknown subdir/ignored
786 $ touch subdir/unknown subdir/ignored
794
787
795 Check the output
788 Check the output
796
789
797 $ hg status
790 $ hg status
798 M modified
791 M modified
799 M subdir/modified
792 M subdir/modified
800 R removed
793 R removed
801 R subdir/removed
794 R subdir/removed
802 ! deleted
795 ! deleted
803 ! subdir/deleted
796 ! subdir/deleted
804 ? subdir/unknown
797 ? subdir/unknown
805 ? unknown
798 ? unknown
806
799
807 $ hg status -mard
800 $ hg status -mard
808 M modified
801 M modified
809 M subdir/modified
802 M subdir/modified
810 R removed
803 R removed
811 R subdir/removed
804 R subdir/removed
812 ! deleted
805 ! deleted
813 ! subdir/deleted
806 ! subdir/deleted
814
807
815 $ hg status -A
808 $ hg status -A
816 M modified
809 M modified
817 M subdir/modified
810 M subdir/modified
818 R removed
811 R removed
819 R subdir/removed
812 R subdir/removed
820 ! deleted
813 ! deleted
821 ! subdir/deleted
814 ! subdir/deleted
822 ? subdir/unknown
815 ? subdir/unknown
823 ? unknown
816 ? unknown
824 I ignored
817 I ignored
825 I subdir/ignored
818 I subdir/ignored
826 C .hgignore
819 C .hgignore
827 C clean
820 C clean
828 C subdir/clean
821 C subdir/clean
829
822
830 Note: `hg status some-name` creates a patternmatcher which is not supported
823 Note: `hg status some-name` creates a patternmatcher which is not supported
831 yet by the Rust implementation of status, but includematcher is supported.
824 yet by the Rust implementation of status, but includematcher is supported.
832 --include is used below for that reason
825 --include is used below for that reason
833
826
834 #if unix-permissions
827 #if unix-permissions
835
828
836 Not having permission to read a directory that contains tracked files makes
829 Not having permission to read a directory that contains tracked files makes
837 status emit a warning then behave as if the directory was empty or removed
830 status emit a warning then behave as if the directory was empty or removed
838 entirely:
831 entirely:
839
832
840 $ chmod 0 subdir
833 $ chmod 0 subdir
841 $ hg status --include subdir
834 $ hg status --include subdir
842 subdir: Permission denied
835 subdir: Permission denied
843 R subdir/removed
836 R subdir/removed
844 ! subdir/clean
837 ! subdir/clean
845 ! subdir/deleted
838 ! subdir/deleted
846 ! subdir/modified
839 ! subdir/modified
847 $ chmod 755 subdir
840 $ chmod 755 subdir
848
841
849 #endif
842 #endif
850
843
851 Remove a directory that contains tracked files
844 Remove a directory that contains tracked files
852
845
853 $ rm -r subdir
846 $ rm -r subdir
854 $ hg status --include subdir
847 $ hg status --include subdir
855 R subdir/removed
848 R subdir/removed
856 ! subdir/clean
849 ! subdir/clean
857 ! subdir/deleted
850 ! subdir/deleted
858 ! subdir/modified
851 ! subdir/modified
859
852
860 and replace it by a file
853 and replace it by a file
861
854
862 $ touch subdir
855 $ touch subdir
863 $ hg status --include subdir
856 $ hg status --include subdir
864 R subdir/removed
857 R subdir/removed
865 ! subdir/clean
858 ! subdir/clean
866 ! subdir/deleted
859 ! subdir/deleted
867 ! subdir/modified
860 ! subdir/modified
868 ? subdir
861 ? subdir
869
862
870 Replaced a deleted or removed file with a directory
863 Replaced a deleted or removed file with a directory
871
864
872 $ mkdir deleted removed
865 $ mkdir deleted removed
873 $ touch deleted/1 removed/1
866 $ touch deleted/1 removed/1
874 $ hg status --include deleted --include removed
867 $ hg status --include deleted --include removed
875 R removed
868 R removed
876 ! deleted
869 ! deleted
877 ? deleted/1
870 ? deleted/1
878 ? removed/1
871 ? removed/1
879 $ hg add removed/1
872 $ hg add removed/1
880 $ hg status --include deleted --include removed
873 $ hg status --include deleted --include removed
881 A removed/1
874 A removed/1
882 R removed
875 R removed
883 ! deleted
876 ! deleted
884 ? deleted/1
877 ? deleted/1
885
878
886 Deeply nested files in an ignored directory are still listed on request
879 Deeply nested files in an ignored directory are still listed on request
887
880
888 $ echo ignored-dir >> .hgignore
881 $ echo ignored-dir >> .hgignore
889 $ mkdir ignored-dir
882 $ mkdir ignored-dir
890 $ mkdir ignored-dir/subdir
883 $ mkdir ignored-dir/subdir
891 $ touch ignored-dir/subdir/1
884 $ touch ignored-dir/subdir/1
892 $ hg status --ignored
885 $ hg status --ignored
893 I ignored
886 I ignored
894 I ignored-dir/subdir/1
887 I ignored-dir/subdir/1
895
888
896 Check using include flag while listing ignored composes correctly (issue6514)
889 Check using include flag while listing ignored composes correctly (issue6514)
897
890
898 $ cd ..
891 $ cd ..
899 $ hg init issue6514
892 $ hg init issue6514
900 $ cd issue6514
893 $ cd issue6514
901 $ mkdir ignored-folder
894 $ mkdir ignored-folder
902 $ touch A.hs B.hs C.hs ignored-folder/other.txt ignored-folder/ctest.hs
895 $ touch A.hs B.hs C.hs ignored-folder/other.txt ignored-folder/ctest.hs
903 $ cat >.hgignore <<EOF
896 $ cat >.hgignore <<EOF
904 > A.hs
897 > A.hs
905 > B.hs
898 > B.hs
906 > ignored-folder/
899 > ignored-folder/
907 > EOF
900 > EOF
908 $ hg st -i -I 're:.*\.hs$'
901 $ hg st -i -I 're:.*\.hs$'
909 I A.hs
902 I A.hs
910 I B.hs
903 I B.hs
911 I ignored-folder/ctest.hs
904 I ignored-folder/ctest.hs
912
905
913 #if dirstate-v2
906 #if rust dirstate-v2
914
907
915 Check read_dir caching
908 Check read_dir caching
916
909
917 $ cd ..
910 $ cd ..
918 $ hg init repo8
911 $ hg init repo8
919 $ cd repo8
912 $ cd repo8
920 $ mkdir subdir
913 $ mkdir subdir
921 $ touch subdir/a subdir/b
914 $ touch subdir/a subdir/b
922 $ hg ci -Aqm '#0'
915 $ hg ci -Aqm '#0'
923
916
924 The cached mtime is initially unset
917 The cached mtime is initially unset
925
918
926 $ hg debugdirstate --all --no-dates | grep '^ '
919 $ hg debugdirstate --all --no-dates | grep '^ '
927 0 -1 unset subdir
920 0 -1 unset subdir
928
921
929 It is still not set when there are unknown files
922 It is still not set when there are unknown files
930
923
931 $ touch subdir/unknown
924 $ touch subdir/unknown
932 $ hg status
925 $ hg status
933 ? subdir/unknown
926 ? subdir/unknown
934 $ hg debugdirstate --all --no-dates | grep '^ '
927 $ hg debugdirstate --all --no-dates | grep '^ '
935 0 -1 unset subdir
928 0 -1 unset subdir
936
929
937 Now the directory is eligible for caching, so its mtime is save in the dirstate
930 Now the directory is eligible for caching, so its mtime is save in the dirstate
938
931
939 $ rm subdir/unknown
932 $ rm subdir/unknown
940 $ hg status
933 $ hg status
941 $ hg debugdirstate --all --no-dates | grep '^ '
934 $ hg debugdirstate --all --no-dates | grep '^ '
942 0 -1 set subdir
935 0 -1 set subdir
943
936
944 This time the command should be ever so slightly faster since it does not need `read_dir("subdir")`
937 This time the command should be ever so slightly faster since it does not need `read_dir("subdir")`
945
938
946 $ hg status
939 $ hg status
947
940
948 Creating a new file changes the directorys mtime, invalidating the cache
941 Creating a new file changes the directorys mtime, invalidating the cache
949
942
950 $ touch subdir/unknown
943 $ touch subdir/unknown
951 $ hg status
944 $ hg status
952 ? subdir/unknown
945 ? subdir/unknown
953
946
954 $ rm subdir/unknown
947 $ rm subdir/unknown
955 $ hg status
948 $ hg status
956
949
957 Removing a node from the dirstate resets the cache for its parent directory
950 Removing a node from the dirstate resets the cache for its parent directory
958
951
959 $ hg forget subdir/a
952 $ hg forget subdir/a
960 $ hg debugdirstate --all --no-dates | grep '^ '
953 $ hg debugdirstate --all --no-dates | grep '^ '
961 0 -1 set subdir
954 0 -1 set subdir
962 $ hg ci -qm '#1'
955 $ hg ci -qm '#1'
963 $ hg debugdirstate --all --no-dates | grep '^ '
956 $ hg debugdirstate --all --no-dates | grep '^ '
964 0 -1 unset subdir
957 0 -1 unset subdir
965 $ hg status
958 $ hg status
966 ? subdir/a
959 ? subdir/a
967
960
968 #endif
961 #endif
@@ -1,283 +1,282 b''
1 #require symlink
1 #require symlink
2
2
3 #testcases dirstate-v1 dirstate-v2
3 #testcases dirstate-v1 dirstate-v2
4
4
5 #if dirstate-v2
5 #if dirstate-v2
6 #require rust
7 $ echo '[format]' >> $HGRCPATH
6 $ echo '[format]' >> $HGRCPATH
8 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
7 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
9 #endif
8 #endif
10
9
11 == tests added in 0.7 ==
10 == tests added in 0.7 ==
12
11
13 $ hg init test-symlinks-0.7; cd test-symlinks-0.7;
12 $ hg init test-symlinks-0.7; cd test-symlinks-0.7;
14 $ touch foo; ln -s foo bar; ln -s nonexistent baz
13 $ touch foo; ln -s foo bar; ln -s nonexistent baz
15
14
16 import with add and addremove -- symlink walking should _not_ screwup.
15 import with add and addremove -- symlink walking should _not_ screwup.
17
16
18 $ hg add
17 $ hg add
19 adding bar
18 adding bar
20 adding baz
19 adding baz
21 adding foo
20 adding foo
22 $ hg forget bar baz foo
21 $ hg forget bar baz foo
23 $ hg addremove
22 $ hg addremove
24 adding bar
23 adding bar
25 adding baz
24 adding baz
26 adding foo
25 adding foo
27
26
28 commit -- the symlink should _not_ appear added to dir state
27 commit -- the symlink should _not_ appear added to dir state
29
28
30 $ hg commit -m 'initial'
29 $ hg commit -m 'initial'
31
30
32 $ touch bomb
31 $ touch bomb
33
32
34 again, symlink should _not_ show up on dir state
33 again, symlink should _not_ show up on dir state
35
34
36 $ hg addremove
35 $ hg addremove
37 adding bomb
36 adding bomb
38
37
39 Assert screamed here before, should go by without consequence
38 Assert screamed here before, should go by without consequence
40
39
41 $ hg commit -m 'is there a bug?'
40 $ hg commit -m 'is there a bug?'
42 $ cd ..
41 $ cd ..
43
42
44
43
45 == fifo & ignore ==
44 == fifo & ignore ==
46
45
47 $ hg init test; cd test;
46 $ hg init test; cd test;
48
47
49 $ mkdir dir
48 $ mkdir dir
50 $ touch a.c dir/a.o dir/b.o
49 $ touch a.c dir/a.o dir/b.o
51
50
52 test what happens if we want to trick hg
51 test what happens if we want to trick hg
53
52
54 $ hg commit -A -m 0
53 $ hg commit -A -m 0
55 adding a.c
54 adding a.c
56 adding dir/a.o
55 adding dir/a.o
57 adding dir/b.o
56 adding dir/b.o
58 $ echo "relglob:*.o" > .hgignore
57 $ echo "relglob:*.o" > .hgignore
59 $ rm a.c
58 $ rm a.c
60 $ rm dir/a.o
59 $ rm dir/a.o
61 $ rm dir/b.o
60 $ rm dir/b.o
62 $ mkdir dir/a.o
61 $ mkdir dir/a.o
63 $ ln -s nonexistent dir/b.o
62 $ ln -s nonexistent dir/b.o
64 $ mkfifo a.c
63 $ mkfifo a.c
65
64
66 it should show a.c, dir/a.o and dir/b.o deleted
65 it should show a.c, dir/a.o and dir/b.o deleted
67
66
68 $ hg status
67 $ hg status
69 M dir/b.o
68 M dir/b.o
70 ! a.c
69 ! a.c
71 ! dir/a.o
70 ! dir/a.o
72 ? .hgignore
71 ? .hgignore
73 $ hg status a.c
72 $ hg status a.c
74 a.c: unsupported file type (type is fifo)
73 a.c: unsupported file type (type is fifo)
75 ! a.c
74 ! a.c
76 $ cd ..
75 $ cd ..
77
76
78
77
79 == symlinks from outside the tree ==
78 == symlinks from outside the tree ==
80
79
81 test absolute path through symlink outside repo
80 test absolute path through symlink outside repo
82
81
83 $ p=`pwd`
82 $ p=`pwd`
84 $ hg init x
83 $ hg init x
85 $ ln -s x y
84 $ ln -s x y
86 $ cd x
85 $ cd x
87 $ touch f
86 $ touch f
88 $ hg add f
87 $ hg add f
89 $ hg status "$p"/y/f
88 $ hg status "$p"/y/f
90 A f
89 A f
91
90
92 try symlink outside repo to file inside
91 try symlink outside repo to file inside
93
92
94 $ ln -s x/f ../z
93 $ ln -s x/f ../z
95
94
96 this should fail
95 this should fail
97
96
98 $ hg status ../z && { echo hg mistakenly exited with status 0; exit 1; } || :
97 $ hg status ../z && { echo hg mistakenly exited with status 0; exit 1; } || :
99 abort: ../z not under root '$TESTTMP/x'
98 abort: ../z not under root '$TESTTMP/x'
100 $ cd ..
99 $ cd ..
101
100
102
101
103 == cloning symlinks ==
102 == cloning symlinks ==
104 $ hg init clone; cd clone;
103 $ hg init clone; cd clone;
105
104
106 try cloning symlink in a subdir
105 try cloning symlink in a subdir
107 1. commit a symlink
106 1. commit a symlink
108
107
109 $ mkdir -p a/b/c
108 $ mkdir -p a/b/c
110 $ cd a/b/c
109 $ cd a/b/c
111 $ ln -s /path/to/symlink/source demo
110 $ ln -s /path/to/symlink/source demo
112 $ cd ../../..
111 $ cd ../../..
113 $ hg stat
112 $ hg stat
114 ? a/b/c/demo
113 ? a/b/c/demo
115 $ hg commit -A -m 'add symlink in a/b/c subdir'
114 $ hg commit -A -m 'add symlink in a/b/c subdir'
116 adding a/b/c/demo
115 adding a/b/c/demo
117
116
118 2. clone it
117 2. clone it
119
118
120 $ cd ..
119 $ cd ..
121 $ hg clone clone clonedest
120 $ hg clone clone clonedest
122 updating to branch default
121 updating to branch default
123 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
122 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
124
123
125
124
126 == symlink and git diffs ==
125 == symlink and git diffs ==
127
126
128 git symlink diff
127 git symlink diff
129
128
130 $ cd clonedest
129 $ cd clonedest
131 $ hg diff --git -r null:tip
130 $ hg diff --git -r null:tip
132 diff --git a/a/b/c/demo b/a/b/c/demo
131 diff --git a/a/b/c/demo b/a/b/c/demo
133 new file mode 120000
132 new file mode 120000
134 --- /dev/null
133 --- /dev/null
135 +++ b/a/b/c/demo
134 +++ b/a/b/c/demo
136 @@ -0,0 +1,1 @@
135 @@ -0,0 +1,1 @@
137 +/path/to/symlink/source
136 +/path/to/symlink/source
138 \ No newline at end of file
137 \ No newline at end of file
139 $ hg export --git tip > ../sl.diff
138 $ hg export --git tip > ../sl.diff
140
139
141 import git symlink diff
140 import git symlink diff
142
141
143 $ hg rm a/b/c/demo
142 $ hg rm a/b/c/demo
144 $ hg commit -m'remove link'
143 $ hg commit -m'remove link'
145 $ hg import ../sl.diff
144 $ hg import ../sl.diff
146 applying ../sl.diff
145 applying ../sl.diff
147 $ hg diff --git -r 1:tip
146 $ hg diff --git -r 1:tip
148 diff --git a/a/b/c/demo b/a/b/c/demo
147 diff --git a/a/b/c/demo b/a/b/c/demo
149 new file mode 120000
148 new file mode 120000
150 --- /dev/null
149 --- /dev/null
151 +++ b/a/b/c/demo
150 +++ b/a/b/c/demo
152 @@ -0,0 +1,1 @@
151 @@ -0,0 +1,1 @@
153 +/path/to/symlink/source
152 +/path/to/symlink/source
154 \ No newline at end of file
153 \ No newline at end of file
155
154
156 == symlinks and addremove ==
155 == symlinks and addremove ==
157
156
158 directory moved and symlinked
157 directory moved and symlinked
159
158
160 $ mkdir foo
159 $ mkdir foo
161 $ touch foo/a
160 $ touch foo/a
162 $ hg ci -Ama
161 $ hg ci -Ama
163 adding foo/a
162 adding foo/a
164 $ mv foo bar
163 $ mv foo bar
165 $ ln -s bar foo
164 $ ln -s bar foo
166 $ hg status
165 $ hg status
167 ! foo/a
166 ! foo/a
168 ? bar/a
167 ? bar/a
169 ? foo
168 ? foo
170
169
171 now addremove should remove old files
170 now addremove should remove old files
172
171
173 $ hg addremove
172 $ hg addremove
174 adding bar/a
173 adding bar/a
175 adding foo
174 adding foo
176 removing foo/a
175 removing foo/a
177
176
178 commit and update back
177 commit and update back
179
178
180 $ hg ci -mb
179 $ hg ci -mb
181 $ hg up '.^'
180 $ hg up '.^'
182 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
181 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
183 $ hg up tip
182 $ hg up tip
184 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
183 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
185
184
186 $ cd ..
185 $ cd ..
187
186
188 == root of repository is symlinked ==
187 == root of repository is symlinked ==
189
188
190 $ hg init root
189 $ hg init root
191 $ ln -s root link
190 $ ln -s root link
192 $ cd root
191 $ cd root
193 $ echo foo > foo
192 $ echo foo > foo
194 $ hg status
193 $ hg status
195 ? foo
194 ? foo
196 $ hg status ../link
195 $ hg status ../link
197 ? foo
196 ? foo
198 $ hg add foo
197 $ hg add foo
199 $ hg cp foo "$TESTTMP/link/bar"
198 $ hg cp foo "$TESTTMP/link/bar"
200 foo has not been committed yet, so no copy data will be stored for bar.
199 foo has not been committed yet, so no copy data will be stored for bar.
201 $ cd ..
200 $ cd ..
202
201
203
202
204 $ hg init b
203 $ hg init b
205 $ cd b
204 $ cd b
206 $ ln -s nothing dangling
205 $ ln -s nothing dangling
207 $ hg commit -m 'commit symlink without adding' dangling
206 $ hg commit -m 'commit symlink without adding' dangling
208 abort: dangling: file not tracked!
207 abort: dangling: file not tracked!
209 [10]
208 [10]
210 $ hg add dangling
209 $ hg add dangling
211 $ hg commit -m 'add symlink'
210 $ hg commit -m 'add symlink'
212
211
213 $ hg tip -v
212 $ hg tip -v
214 changeset: 0:cabd88b706fc
213 changeset: 0:cabd88b706fc
215 tag: tip
214 tag: tip
216 user: test
215 user: test
217 date: Thu Jan 01 00:00:00 1970 +0000
216 date: Thu Jan 01 00:00:00 1970 +0000
218 files: dangling
217 files: dangling
219 description:
218 description:
220 add symlink
219 add symlink
221
220
222
221
223 $ hg manifest --debug
222 $ hg manifest --debug
224 2564acbe54bbbedfbf608479340b359f04597f80 644 @ dangling
223 2564acbe54bbbedfbf608479340b359f04597f80 644 @ dangling
225 $ readlink.py dangling
224 $ readlink.py dangling
226 dangling -> nothing
225 dangling -> nothing
227
226
228 $ rm dangling
227 $ rm dangling
229 $ ln -s void dangling
228 $ ln -s void dangling
230 $ hg commit -m 'change symlink'
229 $ hg commit -m 'change symlink'
231 $ readlink.py dangling
230 $ readlink.py dangling
232 dangling -> void
231 dangling -> void
233
232
234
233
235 modifying link
234 modifying link
236
235
237 $ rm dangling
236 $ rm dangling
238 $ ln -s empty dangling
237 $ ln -s empty dangling
239 $ readlink.py dangling
238 $ readlink.py dangling
240 dangling -> empty
239 dangling -> empty
241
240
242
241
243 reverting to rev 0:
242 reverting to rev 0:
244
243
245 $ hg revert -r 0 -a
244 $ hg revert -r 0 -a
246 reverting dangling
245 reverting dangling
247 $ readlink.py dangling
246 $ readlink.py dangling
248 dangling -> nothing
247 dangling -> nothing
249
248
250
249
251 backups:
250 backups:
252
251
253 $ readlink.py *.orig
252 $ readlink.py *.orig
254 dangling.orig -> empty
253 dangling.orig -> empty
255 $ rm *.orig
254 $ rm *.orig
256 $ hg up -C
255 $ hg up -C
257 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
256 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
258
257
259 copies
258 copies
260
259
261 $ hg cp -v dangling dangling2
260 $ hg cp -v dangling dangling2
262 copying dangling to dangling2
261 copying dangling to dangling2
263 $ hg st -Cmard
262 $ hg st -Cmard
264 A dangling2
263 A dangling2
265 dangling
264 dangling
266 $ readlink.py dangling dangling2
265 $ readlink.py dangling dangling2
267 dangling -> void
266 dangling -> void
268 dangling2 -> void
267 dangling2 -> void
269
268
270
269
271 Issue995: hg copy -A incorrectly handles symbolic links
270 Issue995: hg copy -A incorrectly handles symbolic links
272
271
273 $ hg up -C
272 $ hg up -C
274 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
273 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
275 $ mkdir dir
274 $ mkdir dir
276 $ ln -s dir dirlink
275 $ ln -s dir dirlink
277 $ hg ci -qAm 'add dirlink'
276 $ hg ci -qAm 'add dirlink'
278 $ mkdir newdir
277 $ mkdir newdir
279 $ mv dir newdir/dir
278 $ mv dir newdir/dir
280 $ mv dirlink newdir/dirlink
279 $ mv dirlink newdir/dirlink
281 $ hg mv -A dirlink newdir/dirlink
280 $ hg mv -A dirlink newdir/dirlink
282
281
283 $ cd ..
282 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now