##// END OF EJS Templates
dirstate: make sure the dirstate is loaded before the changelog (issue6303)...
marmoute -
r45359:35b255e4 stable
parent child Browse files
Show More
@@ -1,1915 +1,1922 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from .pycompat import delattr
18 from .pycompat import delattr
19
19
20 from hgdemandimport import tracing
20 from hgdemandimport import tracing
21
21
22 from . import (
22 from . import (
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 txnutil,
31 txnutil,
32 util,
32 util,
33 )
33 )
34
34
35 from .interfaces import (
35 from .interfaces import (
36 dirstate as intdirstate,
36 dirstate as intdirstate,
37 util as interfaceutil,
37 util as interfaceutil,
38 )
38 )
39
39
40 parsers = policy.importmod('parsers')
40 parsers = policy.importmod('parsers')
41 rustmod = policy.importrust('dirstate')
41 rustmod = policy.importrust('dirstate')
42
42
43 propertycache = util.propertycache
43 propertycache = util.propertycache
44 filecache = scmutil.filecache
44 filecache = scmutil.filecache
45 _rangemask = 0x7FFFFFFF
45 _rangemask = 0x7FFFFFFF
46
46
47 dirstatetuple = parsers.dirstatetuple
47 dirstatetuple = parsers.dirstatetuple
48
48
49
49
50 class repocache(filecache):
50 class repocache(filecache):
51 """filecache for files in .hg/"""
51 """filecache for files in .hg/"""
52
52
53 def join(self, obj, fname):
53 def join(self, obj, fname):
54 return obj._opener.join(fname)
54 return obj._opener.join(fname)
55
55
56
56
57 class rootcache(filecache):
57 class rootcache(filecache):
58 """filecache for files in the repository root"""
58 """filecache for files in the repository root"""
59
59
60 def join(self, obj, fname):
60 def join(self, obj, fname):
61 return obj._join(fname)
61 return obj._join(fname)
62
62
63
63
64 def _getfsnow(vfs):
64 def _getfsnow(vfs):
65 '''Get "now" timestamp on filesystem'''
65 '''Get "now" timestamp on filesystem'''
66 tmpfd, tmpname = vfs.mkstemp()
66 tmpfd, tmpname = vfs.mkstemp()
67 try:
67 try:
68 return os.fstat(tmpfd)[stat.ST_MTIME]
68 return os.fstat(tmpfd)[stat.ST_MTIME]
69 finally:
69 finally:
70 os.close(tmpfd)
70 os.close(tmpfd)
71 vfs.unlink(tmpname)
71 vfs.unlink(tmpname)
72
72
73
73
74 @interfaceutil.implementer(intdirstate.idirstate)
74 @interfaceutil.implementer(intdirstate.idirstate)
75 class dirstate(object):
75 class dirstate(object):
76 def __init__(self, opener, ui, root, validate, sparsematchfn):
76 def __init__(self, opener, ui, root, validate, sparsematchfn):
77 '''Create a new dirstate object.
77 '''Create a new dirstate object.
78
78
79 opener is an open()-like callable that can be used to open the
79 opener is an open()-like callable that can be used to open the
80 dirstate file; root is the root of the directory tracked by
80 dirstate file; root is the root of the directory tracked by
81 the dirstate.
81 the dirstate.
82 '''
82 '''
83 self._opener = opener
83 self._opener = opener
84 self._validate = validate
84 self._validate = validate
85 self._root = root
85 self._root = root
86 self._sparsematchfn = sparsematchfn
86 self._sparsematchfn = sparsematchfn
87 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
87 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
88 # UNC path pointing to root share (issue4557)
88 # UNC path pointing to root share (issue4557)
89 self._rootdir = pathutil.normasprefix(root)
89 self._rootdir = pathutil.normasprefix(root)
90 self._dirty = False
90 self._dirty = False
91 self._lastnormaltime = 0
91 self._lastnormaltime = 0
92 self._ui = ui
92 self._ui = ui
93 self._filecache = {}
93 self._filecache = {}
94 self._parentwriters = 0
94 self._parentwriters = 0
95 self._filename = b'dirstate'
95 self._filename = b'dirstate'
96 self._pendingfilename = b'%s.pending' % self._filename
96 self._pendingfilename = b'%s.pending' % self._filename
97 self._plchangecallbacks = {}
97 self._plchangecallbacks = {}
98 self._origpl = None
98 self._origpl = None
99 self._updatedfiles = set()
99 self._updatedfiles = set()
100 self._mapcls = dirstatemap
100 self._mapcls = dirstatemap
101 # Access and cache cwd early, so we don't access it for the first time
101 # Access and cache cwd early, so we don't access it for the first time
102 # after a working-copy update caused it to not exist (accessing it then
102 # after a working-copy update caused it to not exist (accessing it then
103 # raises an exception).
103 # raises an exception).
104 self._cwd
104 self._cwd
105
105
106 def prefetch_parents(self):
107 """make sure the parents are loaded
108
109 Used to avoid a race condition.
110 """
111 self._pl
112
106 @contextlib.contextmanager
113 @contextlib.contextmanager
107 def parentchange(self):
114 def parentchange(self):
108 '''Context manager for handling dirstate parents.
115 '''Context manager for handling dirstate parents.
109
116
110 If an exception occurs in the scope of the context manager,
117 If an exception occurs in the scope of the context manager,
111 the incoherent dirstate won't be written when wlock is
118 the incoherent dirstate won't be written when wlock is
112 released.
119 released.
113 '''
120 '''
114 self._parentwriters += 1
121 self._parentwriters += 1
115 yield
122 yield
116 # Typically we want the "undo" step of a context manager in a
123 # Typically we want the "undo" step of a context manager in a
117 # finally block so it happens even when an exception
124 # finally block so it happens even when an exception
118 # occurs. In this case, however, we only want to decrement
125 # occurs. In this case, however, we only want to decrement
119 # parentwriters if the code in the with statement exits
126 # parentwriters if the code in the with statement exits
120 # normally, so we don't have a try/finally here on purpose.
127 # normally, so we don't have a try/finally here on purpose.
121 self._parentwriters -= 1
128 self._parentwriters -= 1
122
129
123 def pendingparentchange(self):
130 def pendingparentchange(self):
124 '''Returns true if the dirstate is in the middle of a set of changes
131 '''Returns true if the dirstate is in the middle of a set of changes
125 that modify the dirstate parent.
132 that modify the dirstate parent.
126 '''
133 '''
127 return self._parentwriters > 0
134 return self._parentwriters > 0
128
135
129 @propertycache
136 @propertycache
130 def _map(self):
137 def _map(self):
131 """Return the dirstate contents (see documentation for dirstatemap)."""
138 """Return the dirstate contents (see documentation for dirstatemap)."""
132 self._map = self._mapcls(self._ui, self._opener, self._root)
139 self._map = self._mapcls(self._ui, self._opener, self._root)
133 return self._map
140 return self._map
134
141
135 @property
142 @property
136 def _sparsematcher(self):
143 def _sparsematcher(self):
137 """The matcher for the sparse checkout.
144 """The matcher for the sparse checkout.
138
145
139 The working directory may not include every file from a manifest. The
146 The working directory may not include every file from a manifest. The
140 matcher obtained by this property will match a path if it is to be
147 matcher obtained by this property will match a path if it is to be
141 included in the working directory.
148 included in the working directory.
142 """
149 """
143 # TODO there is potential to cache this property. For now, the matcher
150 # TODO there is potential to cache this property. For now, the matcher
144 # is resolved on every access. (But the called function does use a
151 # is resolved on every access. (But the called function does use a
145 # cache to keep the lookup fast.)
152 # cache to keep the lookup fast.)
146 return self._sparsematchfn()
153 return self._sparsematchfn()
147
154
148 @repocache(b'branch')
155 @repocache(b'branch')
149 def _branch(self):
156 def _branch(self):
150 try:
157 try:
151 return self._opener.read(b"branch").strip() or b"default"
158 return self._opener.read(b"branch").strip() or b"default"
152 except IOError as inst:
159 except IOError as inst:
153 if inst.errno != errno.ENOENT:
160 if inst.errno != errno.ENOENT:
154 raise
161 raise
155 return b"default"
162 return b"default"
156
163
157 @property
164 @property
158 def _pl(self):
165 def _pl(self):
159 return self._map.parents()
166 return self._map.parents()
160
167
161 def hasdir(self, d):
168 def hasdir(self, d):
162 return self._map.hastrackeddir(d)
169 return self._map.hastrackeddir(d)
163
170
164 @rootcache(b'.hgignore')
171 @rootcache(b'.hgignore')
165 def _ignore(self):
172 def _ignore(self):
166 files = self._ignorefiles()
173 files = self._ignorefiles()
167 if not files:
174 if not files:
168 return matchmod.never()
175 return matchmod.never()
169
176
170 pats = [b'include:%s' % f for f in files]
177 pats = [b'include:%s' % f for f in files]
171 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
178 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
172
179
173 @propertycache
180 @propertycache
174 def _slash(self):
181 def _slash(self):
175 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
182 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
176
183
177 @propertycache
184 @propertycache
178 def _checklink(self):
185 def _checklink(self):
179 return util.checklink(self._root)
186 return util.checklink(self._root)
180
187
181 @propertycache
188 @propertycache
182 def _checkexec(self):
189 def _checkexec(self):
183 return util.checkexec(self._root)
190 return util.checkexec(self._root)
184
191
185 @propertycache
192 @propertycache
186 def _checkcase(self):
193 def _checkcase(self):
187 return not util.fscasesensitive(self._join(b'.hg'))
194 return not util.fscasesensitive(self._join(b'.hg'))
188
195
189 def _join(self, f):
196 def _join(self, f):
190 # much faster than os.path.join()
197 # much faster than os.path.join()
191 # it's safe because f is always a relative path
198 # it's safe because f is always a relative path
192 return self._rootdir + f
199 return self._rootdir + f
193
200
194 def flagfunc(self, buildfallback):
201 def flagfunc(self, buildfallback):
195 if self._checklink and self._checkexec:
202 if self._checklink and self._checkexec:
196
203
197 def f(x):
204 def f(x):
198 try:
205 try:
199 st = os.lstat(self._join(x))
206 st = os.lstat(self._join(x))
200 if util.statislink(st):
207 if util.statislink(st):
201 return b'l'
208 return b'l'
202 if util.statisexec(st):
209 if util.statisexec(st):
203 return b'x'
210 return b'x'
204 except OSError:
211 except OSError:
205 pass
212 pass
206 return b''
213 return b''
207
214
208 return f
215 return f
209
216
210 fallback = buildfallback()
217 fallback = buildfallback()
211 if self._checklink:
218 if self._checklink:
212
219
213 def f(x):
220 def f(x):
214 if os.path.islink(self._join(x)):
221 if os.path.islink(self._join(x)):
215 return b'l'
222 return b'l'
216 if b'x' in fallback(x):
223 if b'x' in fallback(x):
217 return b'x'
224 return b'x'
218 return b''
225 return b''
219
226
220 return f
227 return f
221 if self._checkexec:
228 if self._checkexec:
222
229
223 def f(x):
230 def f(x):
224 if b'l' in fallback(x):
231 if b'l' in fallback(x):
225 return b'l'
232 return b'l'
226 if util.isexec(self._join(x)):
233 if util.isexec(self._join(x)):
227 return b'x'
234 return b'x'
228 return b''
235 return b''
229
236
230 return f
237 return f
231 else:
238 else:
232 return fallback
239 return fallback
233
240
234 @propertycache
241 @propertycache
235 def _cwd(self):
242 def _cwd(self):
236 # internal config: ui.forcecwd
243 # internal config: ui.forcecwd
237 forcecwd = self._ui.config(b'ui', b'forcecwd')
244 forcecwd = self._ui.config(b'ui', b'forcecwd')
238 if forcecwd:
245 if forcecwd:
239 return forcecwd
246 return forcecwd
240 return encoding.getcwd()
247 return encoding.getcwd()
241
248
242 def getcwd(self):
249 def getcwd(self):
243 '''Return the path from which a canonical path is calculated.
250 '''Return the path from which a canonical path is calculated.
244
251
245 This path should be used to resolve file patterns or to convert
252 This path should be used to resolve file patterns or to convert
246 canonical paths back to file paths for display. It shouldn't be
253 canonical paths back to file paths for display. It shouldn't be
247 used to get real file paths. Use vfs functions instead.
254 used to get real file paths. Use vfs functions instead.
248 '''
255 '''
249 cwd = self._cwd
256 cwd = self._cwd
250 if cwd == self._root:
257 if cwd == self._root:
251 return b''
258 return b''
252 # self._root ends with a path separator if self._root is '/' or 'C:\'
259 # self._root ends with a path separator if self._root is '/' or 'C:\'
253 rootsep = self._root
260 rootsep = self._root
254 if not util.endswithsep(rootsep):
261 if not util.endswithsep(rootsep):
255 rootsep += pycompat.ossep
262 rootsep += pycompat.ossep
256 if cwd.startswith(rootsep):
263 if cwd.startswith(rootsep):
257 return cwd[len(rootsep) :]
264 return cwd[len(rootsep) :]
258 else:
265 else:
259 # we're outside the repo. return an absolute path.
266 # we're outside the repo. return an absolute path.
260 return cwd
267 return cwd
261
268
262 def pathto(self, f, cwd=None):
269 def pathto(self, f, cwd=None):
263 if cwd is None:
270 if cwd is None:
264 cwd = self.getcwd()
271 cwd = self.getcwd()
265 path = util.pathto(self._root, cwd, f)
272 path = util.pathto(self._root, cwd, f)
266 if self._slash:
273 if self._slash:
267 return util.pconvert(path)
274 return util.pconvert(path)
268 return path
275 return path
269
276
270 def __getitem__(self, key):
277 def __getitem__(self, key):
271 '''Return the current state of key (a filename) in the dirstate.
278 '''Return the current state of key (a filename) in the dirstate.
272
279
273 States are:
280 States are:
274 n normal
281 n normal
275 m needs merging
282 m needs merging
276 r marked for removal
283 r marked for removal
277 a marked for addition
284 a marked for addition
278 ? not tracked
285 ? not tracked
279 '''
286 '''
280 return self._map.get(key, (b"?",))[0]
287 return self._map.get(key, (b"?",))[0]
281
288
282 def __contains__(self, key):
289 def __contains__(self, key):
283 return key in self._map
290 return key in self._map
284
291
285 def __iter__(self):
292 def __iter__(self):
286 return iter(sorted(self._map))
293 return iter(sorted(self._map))
287
294
288 def items(self):
295 def items(self):
289 return pycompat.iteritems(self._map)
296 return pycompat.iteritems(self._map)
290
297
291 iteritems = items
298 iteritems = items
292
299
293 def parents(self):
300 def parents(self):
294 return [self._validate(p) for p in self._pl]
301 return [self._validate(p) for p in self._pl]
295
302
296 def p1(self):
303 def p1(self):
297 return self._validate(self._pl[0])
304 return self._validate(self._pl[0])
298
305
299 def p2(self):
306 def p2(self):
300 return self._validate(self._pl[1])
307 return self._validate(self._pl[1])
301
308
302 def branch(self):
309 def branch(self):
303 return encoding.tolocal(self._branch)
310 return encoding.tolocal(self._branch)
304
311
305 def setparents(self, p1, p2=nullid):
312 def setparents(self, p1, p2=nullid):
306 """Set dirstate parents to p1 and p2.
313 """Set dirstate parents to p1 and p2.
307
314
308 When moving from two parents to one, 'm' merged entries a
315 When moving from two parents to one, 'm' merged entries a
309 adjusted to normal and previous copy records discarded and
316 adjusted to normal and previous copy records discarded and
310 returned by the call.
317 returned by the call.
311
318
312 See localrepo.setparents()
319 See localrepo.setparents()
313 """
320 """
314 if self._parentwriters == 0:
321 if self._parentwriters == 0:
315 raise ValueError(
322 raise ValueError(
316 b"cannot set dirstate parent outside of "
323 b"cannot set dirstate parent outside of "
317 b"dirstate.parentchange context manager"
324 b"dirstate.parentchange context manager"
318 )
325 )
319
326
320 self._dirty = True
327 self._dirty = True
321 oldp2 = self._pl[1]
328 oldp2 = self._pl[1]
322 if self._origpl is None:
329 if self._origpl is None:
323 self._origpl = self._pl
330 self._origpl = self._pl
324 self._map.setparents(p1, p2)
331 self._map.setparents(p1, p2)
325 copies = {}
332 copies = {}
326 if oldp2 != nullid and p2 == nullid:
333 if oldp2 != nullid and p2 == nullid:
327 candidatefiles = self._map.nonnormalset.union(
334 candidatefiles = self._map.nonnormalset.union(
328 self._map.otherparentset
335 self._map.otherparentset
329 )
336 )
330 for f in candidatefiles:
337 for f in candidatefiles:
331 s = self._map.get(f)
338 s = self._map.get(f)
332 if s is None:
339 if s is None:
333 continue
340 continue
334
341
335 # Discard 'm' markers when moving away from a merge state
342 # Discard 'm' markers when moving away from a merge state
336 if s[0] == b'm':
343 if s[0] == b'm':
337 source = self._map.copymap.get(f)
344 source = self._map.copymap.get(f)
338 if source:
345 if source:
339 copies[f] = source
346 copies[f] = source
340 self.normallookup(f)
347 self.normallookup(f)
341 # Also fix up otherparent markers
348 # Also fix up otherparent markers
342 elif s[0] == b'n' and s[2] == -2:
349 elif s[0] == b'n' and s[2] == -2:
343 source = self._map.copymap.get(f)
350 source = self._map.copymap.get(f)
344 if source:
351 if source:
345 copies[f] = source
352 copies[f] = source
346 self.add(f)
353 self.add(f)
347 return copies
354 return copies
348
355
349 def setbranch(self, branch):
356 def setbranch(self, branch):
350 self.__class__._branch.set(self, encoding.fromlocal(branch))
357 self.__class__._branch.set(self, encoding.fromlocal(branch))
351 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
358 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
352 try:
359 try:
353 f.write(self._branch + b'\n')
360 f.write(self._branch + b'\n')
354 f.close()
361 f.close()
355
362
356 # make sure filecache has the correct stat info for _branch after
363 # make sure filecache has the correct stat info for _branch after
357 # replacing the underlying file
364 # replacing the underlying file
358 ce = self._filecache[b'_branch']
365 ce = self._filecache[b'_branch']
359 if ce:
366 if ce:
360 ce.refresh()
367 ce.refresh()
361 except: # re-raises
368 except: # re-raises
362 f.discard()
369 f.discard()
363 raise
370 raise
364
371
365 def invalidate(self):
372 def invalidate(self):
366 '''Causes the next access to reread the dirstate.
373 '''Causes the next access to reread the dirstate.
367
374
368 This is different from localrepo.invalidatedirstate() because it always
375 This is different from localrepo.invalidatedirstate() because it always
369 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
376 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
370 check whether the dirstate has changed before rereading it.'''
377 check whether the dirstate has changed before rereading it.'''
371
378
372 for a in ("_map", "_branch", "_ignore"):
379 for a in ("_map", "_branch", "_ignore"):
373 if a in self.__dict__:
380 if a in self.__dict__:
374 delattr(self, a)
381 delattr(self, a)
375 self._lastnormaltime = 0
382 self._lastnormaltime = 0
376 self._dirty = False
383 self._dirty = False
377 self._updatedfiles.clear()
384 self._updatedfiles.clear()
378 self._parentwriters = 0
385 self._parentwriters = 0
379 self._origpl = None
386 self._origpl = None
380
387
381 def copy(self, source, dest):
388 def copy(self, source, dest):
382 """Mark dest as a copy of source. Unmark dest if source is None."""
389 """Mark dest as a copy of source. Unmark dest if source is None."""
383 if source == dest:
390 if source == dest:
384 return
391 return
385 self._dirty = True
392 self._dirty = True
386 if source is not None:
393 if source is not None:
387 self._map.copymap[dest] = source
394 self._map.copymap[dest] = source
388 self._updatedfiles.add(source)
395 self._updatedfiles.add(source)
389 self._updatedfiles.add(dest)
396 self._updatedfiles.add(dest)
390 elif self._map.copymap.pop(dest, None):
397 elif self._map.copymap.pop(dest, None):
391 self._updatedfiles.add(dest)
398 self._updatedfiles.add(dest)
392
399
393 def copied(self, file):
400 def copied(self, file):
394 return self._map.copymap.get(file, None)
401 return self._map.copymap.get(file, None)
395
402
396 def copies(self):
403 def copies(self):
397 return self._map.copymap
404 return self._map.copymap
398
405
399 def _addpath(self, f, state, mode, size, mtime):
406 def _addpath(self, f, state, mode, size, mtime):
400 oldstate = self[f]
407 oldstate = self[f]
401 if state == b'a' or oldstate == b'r':
408 if state == b'a' or oldstate == b'r':
402 scmutil.checkfilename(f)
409 scmutil.checkfilename(f)
403 if self._map.hastrackeddir(f):
410 if self._map.hastrackeddir(f):
404 raise error.Abort(
411 raise error.Abort(
405 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
412 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
406 )
413 )
407 # shadows
414 # shadows
408 for d in pathutil.finddirs(f):
415 for d in pathutil.finddirs(f):
409 if self._map.hastrackeddir(d):
416 if self._map.hastrackeddir(d):
410 break
417 break
411 entry = self._map.get(d)
418 entry = self._map.get(d)
412 if entry is not None and entry[0] != b'r':
419 if entry is not None and entry[0] != b'r':
413 raise error.Abort(
420 raise error.Abort(
414 _(b'file %r in dirstate clashes with %r')
421 _(b'file %r in dirstate clashes with %r')
415 % (pycompat.bytestr(d), pycompat.bytestr(f))
422 % (pycompat.bytestr(d), pycompat.bytestr(f))
416 )
423 )
417 self._dirty = True
424 self._dirty = True
418 self._updatedfiles.add(f)
425 self._updatedfiles.add(f)
419 self._map.addfile(f, oldstate, state, mode, size, mtime)
426 self._map.addfile(f, oldstate, state, mode, size, mtime)
420
427
421 def normal(self, f, parentfiledata=None):
428 def normal(self, f, parentfiledata=None):
422 '''Mark a file normal and clean.
429 '''Mark a file normal and clean.
423
430
424 parentfiledata: (mode, size, mtime) of the clean file
431 parentfiledata: (mode, size, mtime) of the clean file
425
432
426 parentfiledata should be computed from memory (for mode,
433 parentfiledata should be computed from memory (for mode,
427 size), as or close as possible from the point where we
434 size), as or close as possible from the point where we
428 determined the file was clean, to limit the risk of the
435 determined the file was clean, to limit the risk of the
429 file having been changed by an external process between the
436 file having been changed by an external process between the
430 moment where the file was determined to be clean and now.'''
437 moment where the file was determined to be clean and now.'''
431 if parentfiledata:
438 if parentfiledata:
432 (mode, size, mtime) = parentfiledata
439 (mode, size, mtime) = parentfiledata
433 else:
440 else:
434 s = os.lstat(self._join(f))
441 s = os.lstat(self._join(f))
435 mode = s.st_mode
442 mode = s.st_mode
436 size = s.st_size
443 size = s.st_size
437 mtime = s[stat.ST_MTIME]
444 mtime = s[stat.ST_MTIME]
438 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
445 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
439 self._map.copymap.pop(f, None)
446 self._map.copymap.pop(f, None)
440 if f in self._map.nonnormalset:
447 if f in self._map.nonnormalset:
441 self._map.nonnormalset.remove(f)
448 self._map.nonnormalset.remove(f)
442 if mtime > self._lastnormaltime:
449 if mtime > self._lastnormaltime:
443 # Remember the most recent modification timeslot for status(),
450 # Remember the most recent modification timeslot for status(),
444 # to make sure we won't miss future size-preserving file content
451 # to make sure we won't miss future size-preserving file content
445 # modifications that happen within the same timeslot.
452 # modifications that happen within the same timeslot.
446 self._lastnormaltime = mtime
453 self._lastnormaltime = mtime
447
454
448 def normallookup(self, f):
455 def normallookup(self, f):
449 '''Mark a file normal, but possibly dirty.'''
456 '''Mark a file normal, but possibly dirty.'''
450 if self._pl[1] != nullid:
457 if self._pl[1] != nullid:
451 # if there is a merge going on and the file was either
458 # if there is a merge going on and the file was either
452 # in state 'm' (-1) or coming from other parent (-2) before
459 # in state 'm' (-1) or coming from other parent (-2) before
453 # being removed, restore that state.
460 # being removed, restore that state.
454 entry = self._map.get(f)
461 entry = self._map.get(f)
455 if entry is not None:
462 if entry is not None:
456 if entry[0] == b'r' and entry[2] in (-1, -2):
463 if entry[0] == b'r' and entry[2] in (-1, -2):
457 source = self._map.copymap.get(f)
464 source = self._map.copymap.get(f)
458 if entry[2] == -1:
465 if entry[2] == -1:
459 self.merge(f)
466 self.merge(f)
460 elif entry[2] == -2:
467 elif entry[2] == -2:
461 self.otherparent(f)
468 self.otherparent(f)
462 if source:
469 if source:
463 self.copy(source, f)
470 self.copy(source, f)
464 return
471 return
465 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
472 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
466 return
473 return
467 self._addpath(f, b'n', 0, -1, -1)
474 self._addpath(f, b'n', 0, -1, -1)
468 self._map.copymap.pop(f, None)
475 self._map.copymap.pop(f, None)
469
476
470 def otherparent(self, f):
477 def otherparent(self, f):
471 '''Mark as coming from the other parent, always dirty.'''
478 '''Mark as coming from the other parent, always dirty.'''
472 if self._pl[1] == nullid:
479 if self._pl[1] == nullid:
473 raise error.Abort(
480 raise error.Abort(
474 _(b"setting %r to other parent only allowed in merges") % f
481 _(b"setting %r to other parent only allowed in merges") % f
475 )
482 )
476 if f in self and self[f] == b'n':
483 if f in self and self[f] == b'n':
477 # merge-like
484 # merge-like
478 self._addpath(f, b'm', 0, -2, -1)
485 self._addpath(f, b'm', 0, -2, -1)
479 else:
486 else:
480 # add-like
487 # add-like
481 self._addpath(f, b'n', 0, -2, -1)
488 self._addpath(f, b'n', 0, -2, -1)
482 self._map.copymap.pop(f, None)
489 self._map.copymap.pop(f, None)
483
490
484 def add(self, f):
491 def add(self, f):
485 '''Mark a file added.'''
492 '''Mark a file added.'''
486 self._addpath(f, b'a', 0, -1, -1)
493 self._addpath(f, b'a', 0, -1, -1)
487 self._map.copymap.pop(f, None)
494 self._map.copymap.pop(f, None)
488
495
489 def remove(self, f):
496 def remove(self, f):
490 '''Mark a file removed.'''
497 '''Mark a file removed.'''
491 self._dirty = True
498 self._dirty = True
492 oldstate = self[f]
499 oldstate = self[f]
493 size = 0
500 size = 0
494 if self._pl[1] != nullid:
501 if self._pl[1] != nullid:
495 entry = self._map.get(f)
502 entry = self._map.get(f)
496 if entry is not None:
503 if entry is not None:
497 # backup the previous state
504 # backup the previous state
498 if entry[0] == b'm': # merge
505 if entry[0] == b'm': # merge
499 size = -1
506 size = -1
500 elif entry[0] == b'n' and entry[2] == -2: # other parent
507 elif entry[0] == b'n' and entry[2] == -2: # other parent
501 size = -2
508 size = -2
502 self._map.otherparentset.add(f)
509 self._map.otherparentset.add(f)
503 self._updatedfiles.add(f)
510 self._updatedfiles.add(f)
504 self._map.removefile(f, oldstate, size)
511 self._map.removefile(f, oldstate, size)
505 if size == 0:
512 if size == 0:
506 self._map.copymap.pop(f, None)
513 self._map.copymap.pop(f, None)
507
514
508 def merge(self, f):
515 def merge(self, f):
509 '''Mark a file merged.'''
516 '''Mark a file merged.'''
510 if self._pl[1] == nullid:
517 if self._pl[1] == nullid:
511 return self.normallookup(f)
518 return self.normallookup(f)
512 return self.otherparent(f)
519 return self.otherparent(f)
513
520
514 def drop(self, f):
521 def drop(self, f):
515 '''Drop a file from the dirstate'''
522 '''Drop a file from the dirstate'''
516 oldstate = self[f]
523 oldstate = self[f]
517 if self._map.dropfile(f, oldstate):
524 if self._map.dropfile(f, oldstate):
518 self._dirty = True
525 self._dirty = True
519 self._updatedfiles.add(f)
526 self._updatedfiles.add(f)
520 self._map.copymap.pop(f, None)
527 self._map.copymap.pop(f, None)
521
528
522 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
529 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
523 if exists is None:
530 if exists is None:
524 exists = os.path.lexists(os.path.join(self._root, path))
531 exists = os.path.lexists(os.path.join(self._root, path))
525 if not exists:
532 if not exists:
526 # Maybe a path component exists
533 # Maybe a path component exists
527 if not ignoremissing and b'/' in path:
534 if not ignoremissing and b'/' in path:
528 d, f = path.rsplit(b'/', 1)
535 d, f = path.rsplit(b'/', 1)
529 d = self._normalize(d, False, ignoremissing, None)
536 d = self._normalize(d, False, ignoremissing, None)
530 folded = d + b"/" + f
537 folded = d + b"/" + f
531 else:
538 else:
532 # No path components, preserve original case
539 # No path components, preserve original case
533 folded = path
540 folded = path
534 else:
541 else:
535 # recursively normalize leading directory components
542 # recursively normalize leading directory components
536 # against dirstate
543 # against dirstate
537 if b'/' in normed:
544 if b'/' in normed:
538 d, f = normed.rsplit(b'/', 1)
545 d, f = normed.rsplit(b'/', 1)
539 d = self._normalize(d, False, ignoremissing, True)
546 d = self._normalize(d, False, ignoremissing, True)
540 r = self._root + b"/" + d
547 r = self._root + b"/" + d
541 folded = d + b"/" + util.fspath(f, r)
548 folded = d + b"/" + util.fspath(f, r)
542 else:
549 else:
543 folded = util.fspath(normed, self._root)
550 folded = util.fspath(normed, self._root)
544 storemap[normed] = folded
551 storemap[normed] = folded
545
552
546 return folded
553 return folded
547
554
548 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
555 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
549 normed = util.normcase(path)
556 normed = util.normcase(path)
550 folded = self._map.filefoldmap.get(normed, None)
557 folded = self._map.filefoldmap.get(normed, None)
551 if folded is None:
558 if folded is None:
552 if isknown:
559 if isknown:
553 folded = path
560 folded = path
554 else:
561 else:
555 folded = self._discoverpath(
562 folded = self._discoverpath(
556 path, normed, ignoremissing, exists, self._map.filefoldmap
563 path, normed, ignoremissing, exists, self._map.filefoldmap
557 )
564 )
558 return folded
565 return folded
559
566
560 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
567 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
561 normed = util.normcase(path)
568 normed = util.normcase(path)
562 folded = self._map.filefoldmap.get(normed, None)
569 folded = self._map.filefoldmap.get(normed, None)
563 if folded is None:
570 if folded is None:
564 folded = self._map.dirfoldmap.get(normed, None)
571 folded = self._map.dirfoldmap.get(normed, None)
565 if folded is None:
572 if folded is None:
566 if isknown:
573 if isknown:
567 folded = path
574 folded = path
568 else:
575 else:
569 # store discovered result in dirfoldmap so that future
576 # store discovered result in dirfoldmap so that future
570 # normalizefile calls don't start matching directories
577 # normalizefile calls don't start matching directories
571 folded = self._discoverpath(
578 folded = self._discoverpath(
572 path, normed, ignoremissing, exists, self._map.dirfoldmap
579 path, normed, ignoremissing, exists, self._map.dirfoldmap
573 )
580 )
574 return folded
581 return folded
575
582
576 def normalize(self, path, isknown=False, ignoremissing=False):
583 def normalize(self, path, isknown=False, ignoremissing=False):
577 '''
584 '''
578 normalize the case of a pathname when on a casefolding filesystem
585 normalize the case of a pathname when on a casefolding filesystem
579
586
580 isknown specifies whether the filename came from walking the
587 isknown specifies whether the filename came from walking the
581 disk, to avoid extra filesystem access.
588 disk, to avoid extra filesystem access.
582
589
583 If ignoremissing is True, missing path are returned
590 If ignoremissing is True, missing path are returned
584 unchanged. Otherwise, we try harder to normalize possibly
591 unchanged. Otherwise, we try harder to normalize possibly
585 existing path components.
592 existing path components.
586
593
587 The normalized case is determined based on the following precedence:
594 The normalized case is determined based on the following precedence:
588
595
589 - version of name already stored in the dirstate
596 - version of name already stored in the dirstate
590 - version of name stored on disk
597 - version of name stored on disk
591 - version provided via command arguments
598 - version provided via command arguments
592 '''
599 '''
593
600
594 if self._checkcase:
601 if self._checkcase:
595 return self._normalize(path, isknown, ignoremissing)
602 return self._normalize(path, isknown, ignoremissing)
596 return path
603 return path
597
604
598 def clear(self):
605 def clear(self):
599 self._map.clear()
606 self._map.clear()
600 self._lastnormaltime = 0
607 self._lastnormaltime = 0
601 self._updatedfiles.clear()
608 self._updatedfiles.clear()
602 self._dirty = True
609 self._dirty = True
603
610
604 def rebuild(self, parent, allfiles, changedfiles=None):
611 def rebuild(self, parent, allfiles, changedfiles=None):
605 if changedfiles is None:
612 if changedfiles is None:
606 # Rebuild entire dirstate
613 # Rebuild entire dirstate
607 to_lookup = allfiles
614 to_lookup = allfiles
608 to_drop = []
615 to_drop = []
609 lastnormaltime = self._lastnormaltime
616 lastnormaltime = self._lastnormaltime
610 self.clear()
617 self.clear()
611 self._lastnormaltime = lastnormaltime
618 self._lastnormaltime = lastnormaltime
612 elif len(changedfiles) < 10:
619 elif len(changedfiles) < 10:
613 # Avoid turning allfiles into a set, which can be expensive if it's
620 # Avoid turning allfiles into a set, which can be expensive if it's
614 # large.
621 # large.
615 to_lookup = []
622 to_lookup = []
616 to_drop = []
623 to_drop = []
617 for f in changedfiles:
624 for f in changedfiles:
618 if f in allfiles:
625 if f in allfiles:
619 to_lookup.append(f)
626 to_lookup.append(f)
620 else:
627 else:
621 to_drop.append(f)
628 to_drop.append(f)
622 else:
629 else:
623 changedfilesset = set(changedfiles)
630 changedfilesset = set(changedfiles)
624 to_lookup = changedfilesset & set(allfiles)
631 to_lookup = changedfilesset & set(allfiles)
625 to_drop = changedfilesset - to_lookup
632 to_drop = changedfilesset - to_lookup
626
633
627 if self._origpl is None:
634 if self._origpl is None:
628 self._origpl = self._pl
635 self._origpl = self._pl
629 self._map.setparents(parent, nullid)
636 self._map.setparents(parent, nullid)
630
637
631 for f in to_lookup:
638 for f in to_lookup:
632 self.normallookup(f)
639 self.normallookup(f)
633 for f in to_drop:
640 for f in to_drop:
634 self.drop(f)
641 self.drop(f)
635
642
636 self._dirty = True
643 self._dirty = True
637
644
638 def identity(self):
645 def identity(self):
639 '''Return identity of dirstate itself to detect changing in storage
646 '''Return identity of dirstate itself to detect changing in storage
640
647
641 If identity of previous dirstate is equal to this, writing
648 If identity of previous dirstate is equal to this, writing
642 changes based on the former dirstate out can keep consistency.
649 changes based on the former dirstate out can keep consistency.
643 '''
650 '''
644 return self._map.identity
651 return self._map.identity
645
652
646 def write(self, tr):
653 def write(self, tr):
647 if not self._dirty:
654 if not self._dirty:
648 return
655 return
649
656
650 filename = self._filename
657 filename = self._filename
651 if tr:
658 if tr:
652 # 'dirstate.write()' is not only for writing in-memory
659 # 'dirstate.write()' is not only for writing in-memory
653 # changes out, but also for dropping ambiguous timestamp.
660 # changes out, but also for dropping ambiguous timestamp.
654 # delayed writing re-raise "ambiguous timestamp issue".
661 # delayed writing re-raise "ambiguous timestamp issue".
655 # See also the wiki page below for detail:
662 # See also the wiki page below for detail:
656 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
663 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
657
664
658 # emulate dropping timestamp in 'parsers.pack_dirstate'
665 # emulate dropping timestamp in 'parsers.pack_dirstate'
659 now = _getfsnow(self._opener)
666 now = _getfsnow(self._opener)
660 self._map.clearambiguoustimes(self._updatedfiles, now)
667 self._map.clearambiguoustimes(self._updatedfiles, now)
661
668
662 # emulate that all 'dirstate.normal' results are written out
669 # emulate that all 'dirstate.normal' results are written out
663 self._lastnormaltime = 0
670 self._lastnormaltime = 0
664 self._updatedfiles.clear()
671 self._updatedfiles.clear()
665
672
666 # delay writing in-memory changes out
673 # delay writing in-memory changes out
667 tr.addfilegenerator(
674 tr.addfilegenerator(
668 b'dirstate',
675 b'dirstate',
669 (self._filename,),
676 (self._filename,),
670 self._writedirstate,
677 self._writedirstate,
671 location=b'plain',
678 location=b'plain',
672 )
679 )
673 return
680 return
674
681
675 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
682 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
676 self._writedirstate(st)
683 self._writedirstate(st)
677
684
678 def addparentchangecallback(self, category, callback):
685 def addparentchangecallback(self, category, callback):
679 """add a callback to be called when the wd parents are changed
686 """add a callback to be called when the wd parents are changed
680
687
681 Callback will be called with the following arguments:
688 Callback will be called with the following arguments:
682 dirstate, (oldp1, oldp2), (newp1, newp2)
689 dirstate, (oldp1, oldp2), (newp1, newp2)
683
690
684 Category is a unique identifier to allow overwriting an old callback
691 Category is a unique identifier to allow overwriting an old callback
685 with a newer callback.
692 with a newer callback.
686 """
693 """
687 self._plchangecallbacks[category] = callback
694 self._plchangecallbacks[category] = callback
688
695
689 def _writedirstate(self, st):
696 def _writedirstate(self, st):
690 # notify callbacks about parents change
697 # notify callbacks about parents change
691 if self._origpl is not None and self._origpl != self._pl:
698 if self._origpl is not None and self._origpl != self._pl:
692 for c, callback in sorted(
699 for c, callback in sorted(
693 pycompat.iteritems(self._plchangecallbacks)
700 pycompat.iteritems(self._plchangecallbacks)
694 ):
701 ):
695 callback(self, self._origpl, self._pl)
702 callback(self, self._origpl, self._pl)
696 self._origpl = None
703 self._origpl = None
697 # use the modification time of the newly created temporary file as the
704 # use the modification time of the newly created temporary file as the
698 # filesystem's notion of 'now'
705 # filesystem's notion of 'now'
699 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
706 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
700
707
701 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
708 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
702 # timestamp of each entries in dirstate, because of 'now > mtime'
709 # timestamp of each entries in dirstate, because of 'now > mtime'
703 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
710 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
704 if delaywrite > 0:
711 if delaywrite > 0:
705 # do we have any files to delay for?
712 # do we have any files to delay for?
706 for f, e in pycompat.iteritems(self._map):
713 for f, e in pycompat.iteritems(self._map):
707 if e[0] == b'n' and e[3] == now:
714 if e[0] == b'n' and e[3] == now:
708 import time # to avoid useless import
715 import time # to avoid useless import
709
716
710 # rather than sleep n seconds, sleep until the next
717 # rather than sleep n seconds, sleep until the next
711 # multiple of n seconds
718 # multiple of n seconds
712 clock = time.time()
719 clock = time.time()
713 start = int(clock) - (int(clock) % delaywrite)
720 start = int(clock) - (int(clock) % delaywrite)
714 end = start + delaywrite
721 end = start + delaywrite
715 time.sleep(end - clock)
722 time.sleep(end - clock)
716 now = end # trust our estimate that the end is near now
723 now = end # trust our estimate that the end is near now
717 break
724 break
718
725
719 self._map.write(st, now)
726 self._map.write(st, now)
720 self._lastnormaltime = 0
727 self._lastnormaltime = 0
721 self._dirty = False
728 self._dirty = False
722
729
723 def _dirignore(self, f):
730 def _dirignore(self, f):
724 if self._ignore(f):
731 if self._ignore(f):
725 return True
732 return True
726 for p in pathutil.finddirs(f):
733 for p in pathutil.finddirs(f):
727 if self._ignore(p):
734 if self._ignore(p):
728 return True
735 return True
729 return False
736 return False
730
737
731 def _ignorefiles(self):
738 def _ignorefiles(self):
732 files = []
739 files = []
733 if os.path.exists(self._join(b'.hgignore')):
740 if os.path.exists(self._join(b'.hgignore')):
734 files.append(self._join(b'.hgignore'))
741 files.append(self._join(b'.hgignore'))
735 for name, path in self._ui.configitems(b"ui"):
742 for name, path in self._ui.configitems(b"ui"):
736 if name == b'ignore' or name.startswith(b'ignore.'):
743 if name == b'ignore' or name.startswith(b'ignore.'):
737 # we need to use os.path.join here rather than self._join
744 # we need to use os.path.join here rather than self._join
738 # because path is arbitrary and user-specified
745 # because path is arbitrary and user-specified
739 files.append(os.path.join(self._rootdir, util.expandpath(path)))
746 files.append(os.path.join(self._rootdir, util.expandpath(path)))
740 return files
747 return files
741
748
742 def _ignorefileandline(self, f):
749 def _ignorefileandline(self, f):
743 files = collections.deque(self._ignorefiles())
750 files = collections.deque(self._ignorefiles())
744 visited = set()
751 visited = set()
745 while files:
752 while files:
746 i = files.popleft()
753 i = files.popleft()
747 patterns = matchmod.readpatternfile(
754 patterns = matchmod.readpatternfile(
748 i, self._ui.warn, sourceinfo=True
755 i, self._ui.warn, sourceinfo=True
749 )
756 )
750 for pattern, lineno, line in patterns:
757 for pattern, lineno, line in patterns:
751 kind, p = matchmod._patsplit(pattern, b'glob')
758 kind, p = matchmod._patsplit(pattern, b'glob')
752 if kind == b"subinclude":
759 if kind == b"subinclude":
753 if p not in visited:
760 if p not in visited:
754 files.append(p)
761 files.append(p)
755 continue
762 continue
756 m = matchmod.match(
763 m = matchmod.match(
757 self._root, b'', [], [pattern], warn=self._ui.warn
764 self._root, b'', [], [pattern], warn=self._ui.warn
758 )
765 )
759 if m(f):
766 if m(f):
760 return (i, lineno, line)
767 return (i, lineno, line)
761 visited.add(i)
768 visited.add(i)
762 return (None, -1, b"")
769 return (None, -1, b"")
763
770
764 def _walkexplicit(self, match, subrepos):
771 def _walkexplicit(self, match, subrepos):
765 '''Get stat data about the files explicitly specified by match.
772 '''Get stat data about the files explicitly specified by match.
766
773
767 Return a triple (results, dirsfound, dirsnotfound).
774 Return a triple (results, dirsfound, dirsnotfound).
768 - results is a mapping from filename to stat result. It also contains
775 - results is a mapping from filename to stat result. It also contains
769 listings mapping subrepos and .hg to None.
776 listings mapping subrepos and .hg to None.
770 - dirsfound is a list of files found to be directories.
777 - dirsfound is a list of files found to be directories.
771 - dirsnotfound is a list of files that the dirstate thinks are
778 - dirsnotfound is a list of files that the dirstate thinks are
772 directories and that were not found.'''
779 directories and that were not found.'''
773
780
774 def badtype(mode):
781 def badtype(mode):
775 kind = _(b'unknown')
782 kind = _(b'unknown')
776 if stat.S_ISCHR(mode):
783 if stat.S_ISCHR(mode):
777 kind = _(b'character device')
784 kind = _(b'character device')
778 elif stat.S_ISBLK(mode):
785 elif stat.S_ISBLK(mode):
779 kind = _(b'block device')
786 kind = _(b'block device')
780 elif stat.S_ISFIFO(mode):
787 elif stat.S_ISFIFO(mode):
781 kind = _(b'fifo')
788 kind = _(b'fifo')
782 elif stat.S_ISSOCK(mode):
789 elif stat.S_ISSOCK(mode):
783 kind = _(b'socket')
790 kind = _(b'socket')
784 elif stat.S_ISDIR(mode):
791 elif stat.S_ISDIR(mode):
785 kind = _(b'directory')
792 kind = _(b'directory')
786 return _(b'unsupported file type (type is %s)') % kind
793 return _(b'unsupported file type (type is %s)') % kind
787
794
788 badfn = match.bad
795 badfn = match.bad
789 dmap = self._map
796 dmap = self._map
790 lstat = os.lstat
797 lstat = os.lstat
791 getkind = stat.S_IFMT
798 getkind = stat.S_IFMT
792 dirkind = stat.S_IFDIR
799 dirkind = stat.S_IFDIR
793 regkind = stat.S_IFREG
800 regkind = stat.S_IFREG
794 lnkkind = stat.S_IFLNK
801 lnkkind = stat.S_IFLNK
795 join = self._join
802 join = self._join
796 dirsfound = []
803 dirsfound = []
797 foundadd = dirsfound.append
804 foundadd = dirsfound.append
798 dirsnotfound = []
805 dirsnotfound = []
799 notfoundadd = dirsnotfound.append
806 notfoundadd = dirsnotfound.append
800
807
801 if not match.isexact() and self._checkcase:
808 if not match.isexact() and self._checkcase:
802 normalize = self._normalize
809 normalize = self._normalize
803 else:
810 else:
804 normalize = None
811 normalize = None
805
812
806 files = sorted(match.files())
813 files = sorted(match.files())
807 subrepos.sort()
814 subrepos.sort()
808 i, j = 0, 0
815 i, j = 0, 0
809 while i < len(files) and j < len(subrepos):
816 while i < len(files) and j < len(subrepos):
810 subpath = subrepos[j] + b"/"
817 subpath = subrepos[j] + b"/"
811 if files[i] < subpath:
818 if files[i] < subpath:
812 i += 1
819 i += 1
813 continue
820 continue
814 while i < len(files) and files[i].startswith(subpath):
821 while i < len(files) and files[i].startswith(subpath):
815 del files[i]
822 del files[i]
816 j += 1
823 j += 1
817
824
818 if not files or b'' in files:
825 if not files or b'' in files:
819 files = [b'']
826 files = [b'']
820 # constructing the foldmap is expensive, so don't do it for the
827 # constructing the foldmap is expensive, so don't do it for the
821 # common case where files is ['']
828 # common case where files is ['']
822 normalize = None
829 normalize = None
823 results = dict.fromkeys(subrepos)
830 results = dict.fromkeys(subrepos)
824 results[b'.hg'] = None
831 results[b'.hg'] = None
825
832
826 for ff in files:
833 for ff in files:
827 if normalize:
834 if normalize:
828 nf = normalize(ff, False, True)
835 nf = normalize(ff, False, True)
829 else:
836 else:
830 nf = ff
837 nf = ff
831 if nf in results:
838 if nf in results:
832 continue
839 continue
833
840
834 try:
841 try:
835 st = lstat(join(nf))
842 st = lstat(join(nf))
836 kind = getkind(st.st_mode)
843 kind = getkind(st.st_mode)
837 if kind == dirkind:
844 if kind == dirkind:
838 if nf in dmap:
845 if nf in dmap:
839 # file replaced by dir on disk but still in dirstate
846 # file replaced by dir on disk but still in dirstate
840 results[nf] = None
847 results[nf] = None
841 foundadd((nf, ff))
848 foundadd((nf, ff))
842 elif kind == regkind or kind == lnkkind:
849 elif kind == regkind or kind == lnkkind:
843 results[nf] = st
850 results[nf] = st
844 else:
851 else:
845 badfn(ff, badtype(kind))
852 badfn(ff, badtype(kind))
846 if nf in dmap:
853 if nf in dmap:
847 results[nf] = None
854 results[nf] = None
848 except OSError as inst: # nf not found on disk - it is dirstate only
855 except OSError as inst: # nf not found on disk - it is dirstate only
849 if nf in dmap: # does it exactly match a missing file?
856 if nf in dmap: # does it exactly match a missing file?
850 results[nf] = None
857 results[nf] = None
851 else: # does it match a missing directory?
858 else: # does it match a missing directory?
852 if self._map.hasdir(nf):
859 if self._map.hasdir(nf):
853 notfoundadd(nf)
860 notfoundadd(nf)
854 else:
861 else:
855 badfn(ff, encoding.strtolocal(inst.strerror))
862 badfn(ff, encoding.strtolocal(inst.strerror))
856
863
857 # match.files() may contain explicitly-specified paths that shouldn't
864 # match.files() may contain explicitly-specified paths that shouldn't
858 # be taken; drop them from the list of files found. dirsfound/notfound
865 # be taken; drop them from the list of files found. dirsfound/notfound
859 # aren't filtered here because they will be tested later.
866 # aren't filtered here because they will be tested later.
860 if match.anypats():
867 if match.anypats():
861 for f in list(results):
868 for f in list(results):
862 if f == b'.hg' or f in subrepos:
869 if f == b'.hg' or f in subrepos:
863 # keep sentinel to disable further out-of-repo walks
870 # keep sentinel to disable further out-of-repo walks
864 continue
871 continue
865 if not match(f):
872 if not match(f):
866 del results[f]
873 del results[f]
867
874
868 # Case insensitive filesystems cannot rely on lstat() failing to detect
875 # Case insensitive filesystems cannot rely on lstat() failing to detect
869 # a case-only rename. Prune the stat object for any file that does not
876 # a case-only rename. Prune the stat object for any file that does not
870 # match the case in the filesystem, if there are multiple files that
877 # match the case in the filesystem, if there are multiple files that
871 # normalize to the same path.
878 # normalize to the same path.
872 if match.isexact() and self._checkcase:
879 if match.isexact() and self._checkcase:
873 normed = {}
880 normed = {}
874
881
875 for f, st in pycompat.iteritems(results):
882 for f, st in pycompat.iteritems(results):
876 if st is None:
883 if st is None:
877 continue
884 continue
878
885
879 nc = util.normcase(f)
886 nc = util.normcase(f)
880 paths = normed.get(nc)
887 paths = normed.get(nc)
881
888
882 if paths is None:
889 if paths is None:
883 paths = set()
890 paths = set()
884 normed[nc] = paths
891 normed[nc] = paths
885
892
886 paths.add(f)
893 paths.add(f)
887
894
888 for norm, paths in pycompat.iteritems(normed):
895 for norm, paths in pycompat.iteritems(normed):
889 if len(paths) > 1:
896 if len(paths) > 1:
890 for path in paths:
897 for path in paths:
891 folded = self._discoverpath(
898 folded = self._discoverpath(
892 path, norm, True, None, self._map.dirfoldmap
899 path, norm, True, None, self._map.dirfoldmap
893 )
900 )
894 if path != folded:
901 if path != folded:
895 results[path] = None
902 results[path] = None
896
903
897 return results, dirsfound, dirsnotfound
904 return results, dirsfound, dirsnotfound
898
905
899 def walk(self, match, subrepos, unknown, ignored, full=True):
906 def walk(self, match, subrepos, unknown, ignored, full=True):
900 '''
907 '''
901 Walk recursively through the directory tree, finding all files
908 Walk recursively through the directory tree, finding all files
902 matched by match.
909 matched by match.
903
910
904 If full is False, maybe skip some known-clean files.
911 If full is False, maybe skip some known-clean files.
905
912
906 Return a dict mapping filename to stat-like object (either
913 Return a dict mapping filename to stat-like object (either
907 mercurial.osutil.stat instance or return value of os.stat()).
914 mercurial.osutil.stat instance or return value of os.stat()).
908
915
909 '''
916 '''
910 # full is a flag that extensions that hook into walk can use -- this
917 # full is a flag that extensions that hook into walk can use -- this
911 # implementation doesn't use it at all. This satisfies the contract
918 # implementation doesn't use it at all. This satisfies the contract
912 # because we only guarantee a "maybe".
919 # because we only guarantee a "maybe".
913
920
914 if ignored:
921 if ignored:
915 ignore = util.never
922 ignore = util.never
916 dirignore = util.never
923 dirignore = util.never
917 elif unknown:
924 elif unknown:
918 ignore = self._ignore
925 ignore = self._ignore
919 dirignore = self._dirignore
926 dirignore = self._dirignore
920 else:
927 else:
921 # if not unknown and not ignored, drop dir recursion and step 2
928 # if not unknown and not ignored, drop dir recursion and step 2
922 ignore = util.always
929 ignore = util.always
923 dirignore = util.always
930 dirignore = util.always
924
931
925 matchfn = match.matchfn
932 matchfn = match.matchfn
926 matchalways = match.always()
933 matchalways = match.always()
927 matchtdir = match.traversedir
934 matchtdir = match.traversedir
928 dmap = self._map
935 dmap = self._map
929 listdir = util.listdir
936 listdir = util.listdir
930 lstat = os.lstat
937 lstat = os.lstat
931 dirkind = stat.S_IFDIR
938 dirkind = stat.S_IFDIR
932 regkind = stat.S_IFREG
939 regkind = stat.S_IFREG
933 lnkkind = stat.S_IFLNK
940 lnkkind = stat.S_IFLNK
934 join = self._join
941 join = self._join
935
942
936 exact = skipstep3 = False
943 exact = skipstep3 = False
937 if match.isexact(): # match.exact
944 if match.isexact(): # match.exact
938 exact = True
945 exact = True
939 dirignore = util.always # skip step 2
946 dirignore = util.always # skip step 2
940 elif match.prefix(): # match.match, no patterns
947 elif match.prefix(): # match.match, no patterns
941 skipstep3 = True
948 skipstep3 = True
942
949
943 if not exact and self._checkcase:
950 if not exact and self._checkcase:
944 normalize = self._normalize
951 normalize = self._normalize
945 normalizefile = self._normalizefile
952 normalizefile = self._normalizefile
946 skipstep3 = False
953 skipstep3 = False
947 else:
954 else:
948 normalize = self._normalize
955 normalize = self._normalize
949 normalizefile = None
956 normalizefile = None
950
957
951 # step 1: find all explicit files
958 # step 1: find all explicit files
952 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
959 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
953 if matchtdir:
960 if matchtdir:
954 for d in work:
961 for d in work:
955 matchtdir(d[0])
962 matchtdir(d[0])
956 for d in dirsnotfound:
963 for d in dirsnotfound:
957 matchtdir(d)
964 matchtdir(d)
958
965
959 skipstep3 = skipstep3 and not (work or dirsnotfound)
966 skipstep3 = skipstep3 and not (work or dirsnotfound)
960 work = [d for d in work if not dirignore(d[0])]
967 work = [d for d in work if not dirignore(d[0])]
961
968
962 # step 2: visit subdirectories
969 # step 2: visit subdirectories
963 def traverse(work, alreadynormed):
970 def traverse(work, alreadynormed):
964 wadd = work.append
971 wadd = work.append
965 while work:
972 while work:
966 tracing.counter('dirstate.walk work', len(work))
973 tracing.counter('dirstate.walk work', len(work))
967 nd = work.pop()
974 nd = work.pop()
968 visitentries = match.visitchildrenset(nd)
975 visitentries = match.visitchildrenset(nd)
969 if not visitentries:
976 if not visitentries:
970 continue
977 continue
971 if visitentries == b'this' or visitentries == b'all':
978 if visitentries == b'this' or visitentries == b'all':
972 visitentries = None
979 visitentries = None
973 skip = None
980 skip = None
974 if nd != b'':
981 if nd != b'':
975 skip = b'.hg'
982 skip = b'.hg'
976 try:
983 try:
977 with tracing.log('dirstate.walk.traverse listdir %s', nd):
984 with tracing.log('dirstate.walk.traverse listdir %s', nd):
978 entries = listdir(join(nd), stat=True, skip=skip)
985 entries = listdir(join(nd), stat=True, skip=skip)
979 except OSError as inst:
986 except OSError as inst:
980 if inst.errno in (errno.EACCES, errno.ENOENT):
987 if inst.errno in (errno.EACCES, errno.ENOENT):
981 match.bad(
988 match.bad(
982 self.pathto(nd), encoding.strtolocal(inst.strerror)
989 self.pathto(nd), encoding.strtolocal(inst.strerror)
983 )
990 )
984 continue
991 continue
985 raise
992 raise
986 for f, kind, st in entries:
993 for f, kind, st in entries:
987 # Some matchers may return files in the visitentries set,
994 # Some matchers may return files in the visitentries set,
988 # instead of 'this', if the matcher explicitly mentions them
995 # instead of 'this', if the matcher explicitly mentions them
989 # and is not an exactmatcher. This is acceptable; we do not
996 # and is not an exactmatcher. This is acceptable; we do not
990 # make any hard assumptions about file-or-directory below
997 # make any hard assumptions about file-or-directory below
991 # based on the presence of `f` in visitentries. If
998 # based on the presence of `f` in visitentries. If
992 # visitchildrenset returned a set, we can always skip the
999 # visitchildrenset returned a set, we can always skip the
993 # entries *not* in the set it provided regardless of whether
1000 # entries *not* in the set it provided regardless of whether
994 # they're actually a file or a directory.
1001 # they're actually a file or a directory.
995 if visitentries and f not in visitentries:
1002 if visitentries and f not in visitentries:
996 continue
1003 continue
997 if normalizefile:
1004 if normalizefile:
998 # even though f might be a directory, we're only
1005 # even though f might be a directory, we're only
999 # interested in comparing it to files currently in the
1006 # interested in comparing it to files currently in the
1000 # dmap -- therefore normalizefile is enough
1007 # dmap -- therefore normalizefile is enough
1001 nf = normalizefile(
1008 nf = normalizefile(
1002 nd and (nd + b"/" + f) or f, True, True
1009 nd and (nd + b"/" + f) or f, True, True
1003 )
1010 )
1004 else:
1011 else:
1005 nf = nd and (nd + b"/" + f) or f
1012 nf = nd and (nd + b"/" + f) or f
1006 if nf not in results:
1013 if nf not in results:
1007 if kind == dirkind:
1014 if kind == dirkind:
1008 if not ignore(nf):
1015 if not ignore(nf):
1009 if matchtdir:
1016 if matchtdir:
1010 matchtdir(nf)
1017 matchtdir(nf)
1011 wadd(nf)
1018 wadd(nf)
1012 if nf in dmap and (matchalways or matchfn(nf)):
1019 if nf in dmap and (matchalways or matchfn(nf)):
1013 results[nf] = None
1020 results[nf] = None
1014 elif kind == regkind or kind == lnkkind:
1021 elif kind == regkind or kind == lnkkind:
1015 if nf in dmap:
1022 if nf in dmap:
1016 if matchalways or matchfn(nf):
1023 if matchalways or matchfn(nf):
1017 results[nf] = st
1024 results[nf] = st
1018 elif (matchalways or matchfn(nf)) and not ignore(
1025 elif (matchalways or matchfn(nf)) and not ignore(
1019 nf
1026 nf
1020 ):
1027 ):
1021 # unknown file -- normalize if necessary
1028 # unknown file -- normalize if necessary
1022 if not alreadynormed:
1029 if not alreadynormed:
1023 nf = normalize(nf, False, True)
1030 nf = normalize(nf, False, True)
1024 results[nf] = st
1031 results[nf] = st
1025 elif nf in dmap and (matchalways or matchfn(nf)):
1032 elif nf in dmap and (matchalways or matchfn(nf)):
1026 results[nf] = None
1033 results[nf] = None
1027
1034
1028 for nd, d in work:
1035 for nd, d in work:
1029 # alreadynormed means that processwork doesn't have to do any
1036 # alreadynormed means that processwork doesn't have to do any
1030 # expensive directory normalization
1037 # expensive directory normalization
1031 alreadynormed = not normalize or nd == d
1038 alreadynormed = not normalize or nd == d
1032 traverse([d], alreadynormed)
1039 traverse([d], alreadynormed)
1033
1040
1034 for s in subrepos:
1041 for s in subrepos:
1035 del results[s]
1042 del results[s]
1036 del results[b'.hg']
1043 del results[b'.hg']
1037
1044
1038 # step 3: visit remaining files from dmap
1045 # step 3: visit remaining files from dmap
1039 if not skipstep3 and not exact:
1046 if not skipstep3 and not exact:
1040 # If a dmap file is not in results yet, it was either
1047 # If a dmap file is not in results yet, it was either
1041 # a) not matching matchfn b) ignored, c) missing, or d) under a
1048 # a) not matching matchfn b) ignored, c) missing, or d) under a
1042 # symlink directory.
1049 # symlink directory.
1043 if not results and matchalways:
1050 if not results and matchalways:
1044 visit = [f for f in dmap]
1051 visit = [f for f in dmap]
1045 else:
1052 else:
1046 visit = [f for f in dmap if f not in results and matchfn(f)]
1053 visit = [f for f in dmap if f not in results and matchfn(f)]
1047 visit.sort()
1054 visit.sort()
1048
1055
1049 if unknown:
1056 if unknown:
1050 # unknown == True means we walked all dirs under the roots
1057 # unknown == True means we walked all dirs under the roots
1051 # that wasn't ignored, and everything that matched was stat'ed
1058 # that wasn't ignored, and everything that matched was stat'ed
1052 # and is already in results.
1059 # and is already in results.
1053 # The rest must thus be ignored or under a symlink.
1060 # The rest must thus be ignored or under a symlink.
1054 audit_path = pathutil.pathauditor(self._root, cached=True)
1061 audit_path = pathutil.pathauditor(self._root, cached=True)
1055
1062
1056 for nf in iter(visit):
1063 for nf in iter(visit):
1057 # If a stat for the same file was already added with a
1064 # If a stat for the same file was already added with a
1058 # different case, don't add one for this, since that would
1065 # different case, don't add one for this, since that would
1059 # make it appear as if the file exists under both names
1066 # make it appear as if the file exists under both names
1060 # on disk.
1067 # on disk.
1061 if (
1068 if (
1062 normalizefile
1069 normalizefile
1063 and normalizefile(nf, True, True) in results
1070 and normalizefile(nf, True, True) in results
1064 ):
1071 ):
1065 results[nf] = None
1072 results[nf] = None
1066 # Report ignored items in the dmap as long as they are not
1073 # Report ignored items in the dmap as long as they are not
1067 # under a symlink directory.
1074 # under a symlink directory.
1068 elif audit_path.check(nf):
1075 elif audit_path.check(nf):
1069 try:
1076 try:
1070 results[nf] = lstat(join(nf))
1077 results[nf] = lstat(join(nf))
1071 # file was just ignored, no links, and exists
1078 # file was just ignored, no links, and exists
1072 except OSError:
1079 except OSError:
1073 # file doesn't exist
1080 # file doesn't exist
1074 results[nf] = None
1081 results[nf] = None
1075 else:
1082 else:
1076 # It's either missing or under a symlink directory
1083 # It's either missing or under a symlink directory
1077 # which we in this case report as missing
1084 # which we in this case report as missing
1078 results[nf] = None
1085 results[nf] = None
1079 else:
1086 else:
1080 # We may not have walked the full directory tree above,
1087 # We may not have walked the full directory tree above,
1081 # so stat and check everything we missed.
1088 # so stat and check everything we missed.
1082 iv = iter(visit)
1089 iv = iter(visit)
1083 for st in util.statfiles([join(i) for i in visit]):
1090 for st in util.statfiles([join(i) for i in visit]):
1084 results[next(iv)] = st
1091 results[next(iv)] = st
1085 return results
1092 return results
1086
1093
1087 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1094 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1088 # Force Rayon (Rust parallelism library) to respect the number of
1095 # Force Rayon (Rust parallelism library) to respect the number of
1089 # workers. This is a temporary workaround until Rust code knows
1096 # workers. This is a temporary workaround until Rust code knows
1090 # how to read the config file.
1097 # how to read the config file.
1091 numcpus = self._ui.configint(b"worker", b"numcpus")
1098 numcpus = self._ui.configint(b"worker", b"numcpus")
1092 if numcpus is not None:
1099 if numcpus is not None:
1093 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1100 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1094
1101
1095 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1102 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1096 if not workers_enabled:
1103 if not workers_enabled:
1097 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1104 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1098
1105
1099 (
1106 (
1100 lookup,
1107 lookup,
1101 modified,
1108 modified,
1102 added,
1109 added,
1103 removed,
1110 removed,
1104 deleted,
1111 deleted,
1105 clean,
1112 clean,
1106 ignored,
1113 ignored,
1107 unknown,
1114 unknown,
1108 warnings,
1115 warnings,
1109 bad,
1116 bad,
1110 ) = rustmod.status(
1117 ) = rustmod.status(
1111 self._map._rustmap,
1118 self._map._rustmap,
1112 matcher,
1119 matcher,
1113 self._rootdir,
1120 self._rootdir,
1114 self._ignorefiles(),
1121 self._ignorefiles(),
1115 self._checkexec,
1122 self._checkexec,
1116 self._lastnormaltime,
1123 self._lastnormaltime,
1117 bool(list_clean),
1124 bool(list_clean),
1118 bool(list_ignored),
1125 bool(list_ignored),
1119 bool(list_unknown),
1126 bool(list_unknown),
1120 )
1127 )
1121 if self._ui.warn:
1128 if self._ui.warn:
1122 for item in warnings:
1129 for item in warnings:
1123 if isinstance(item, tuple):
1130 if isinstance(item, tuple):
1124 file_path, syntax = item
1131 file_path, syntax = item
1125 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1132 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1126 file_path,
1133 file_path,
1127 syntax,
1134 syntax,
1128 )
1135 )
1129 self._ui.warn(msg)
1136 self._ui.warn(msg)
1130 else:
1137 else:
1131 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1138 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1132 self._ui.warn(
1139 self._ui.warn(
1133 msg
1140 msg
1134 % (
1141 % (
1135 pathutil.canonpath(
1142 pathutil.canonpath(
1136 self._rootdir, self._rootdir, item
1143 self._rootdir, self._rootdir, item
1137 ),
1144 ),
1138 b"No such file or directory",
1145 b"No such file or directory",
1139 )
1146 )
1140 )
1147 )
1141
1148
1142 for (fn, message) in bad:
1149 for (fn, message) in bad:
1143 matcher.bad(fn, encoding.strtolocal(message))
1150 matcher.bad(fn, encoding.strtolocal(message))
1144
1151
1145 status = scmutil.status(
1152 status = scmutil.status(
1146 modified=modified,
1153 modified=modified,
1147 added=added,
1154 added=added,
1148 removed=removed,
1155 removed=removed,
1149 deleted=deleted,
1156 deleted=deleted,
1150 unknown=unknown,
1157 unknown=unknown,
1151 ignored=ignored,
1158 ignored=ignored,
1152 clean=clean,
1159 clean=clean,
1153 )
1160 )
1154 return (lookup, status)
1161 return (lookup, status)
1155
1162
1156 def status(self, match, subrepos, ignored, clean, unknown):
1163 def status(self, match, subrepos, ignored, clean, unknown):
1157 '''Determine the status of the working copy relative to the
1164 '''Determine the status of the working copy relative to the
1158 dirstate and return a pair of (unsure, status), where status is of type
1165 dirstate and return a pair of (unsure, status), where status is of type
1159 scmutil.status and:
1166 scmutil.status and:
1160
1167
1161 unsure:
1168 unsure:
1162 files that might have been modified since the dirstate was
1169 files that might have been modified since the dirstate was
1163 written, but need to be read to be sure (size is the same
1170 written, but need to be read to be sure (size is the same
1164 but mtime differs)
1171 but mtime differs)
1165 status.modified:
1172 status.modified:
1166 files that have definitely been modified since the dirstate
1173 files that have definitely been modified since the dirstate
1167 was written (different size or mode)
1174 was written (different size or mode)
1168 status.clean:
1175 status.clean:
1169 files that have definitely not been modified since the
1176 files that have definitely not been modified since the
1170 dirstate was written
1177 dirstate was written
1171 '''
1178 '''
1172 listignored, listclean, listunknown = ignored, clean, unknown
1179 listignored, listclean, listunknown = ignored, clean, unknown
1173 lookup, modified, added, unknown, ignored = [], [], [], [], []
1180 lookup, modified, added, unknown, ignored = [], [], [], [], []
1174 removed, deleted, clean = [], [], []
1181 removed, deleted, clean = [], [], []
1175
1182
1176 dmap = self._map
1183 dmap = self._map
1177 dmap.preload()
1184 dmap.preload()
1178
1185
1179 use_rust = True
1186 use_rust = True
1180
1187
1181 allowed_matchers = (
1188 allowed_matchers = (
1182 matchmod.alwaysmatcher,
1189 matchmod.alwaysmatcher,
1183 matchmod.exactmatcher,
1190 matchmod.exactmatcher,
1184 matchmod.includematcher,
1191 matchmod.includematcher,
1185 )
1192 )
1186
1193
1187 if rustmod is None:
1194 if rustmod is None:
1188 use_rust = False
1195 use_rust = False
1189 elif self._checkcase:
1196 elif self._checkcase:
1190 # Case-insensitive filesystems are not handled yet
1197 # Case-insensitive filesystems are not handled yet
1191 use_rust = False
1198 use_rust = False
1192 elif subrepos:
1199 elif subrepos:
1193 use_rust = False
1200 use_rust = False
1194 elif sparse.enabled:
1201 elif sparse.enabled:
1195 use_rust = False
1202 use_rust = False
1196 elif match.traversedir is not None:
1203 elif match.traversedir is not None:
1197 use_rust = False
1204 use_rust = False
1198 elif not isinstance(match, allowed_matchers):
1205 elif not isinstance(match, allowed_matchers):
1199 # Matchers have yet to be implemented
1206 # Matchers have yet to be implemented
1200 use_rust = False
1207 use_rust = False
1201
1208
1202 if use_rust:
1209 if use_rust:
1203 try:
1210 try:
1204 return self._rust_status(
1211 return self._rust_status(
1205 match, listclean, listignored, listunknown
1212 match, listclean, listignored, listunknown
1206 )
1213 )
1207 except rustmod.FallbackError:
1214 except rustmod.FallbackError:
1208 pass
1215 pass
1209
1216
1210 def noop(f):
1217 def noop(f):
1211 pass
1218 pass
1212
1219
1213 dcontains = dmap.__contains__
1220 dcontains = dmap.__contains__
1214 dget = dmap.__getitem__
1221 dget = dmap.__getitem__
1215 ladd = lookup.append # aka "unsure"
1222 ladd = lookup.append # aka "unsure"
1216 madd = modified.append
1223 madd = modified.append
1217 aadd = added.append
1224 aadd = added.append
1218 uadd = unknown.append if listunknown else noop
1225 uadd = unknown.append if listunknown else noop
1219 iadd = ignored.append if listignored else noop
1226 iadd = ignored.append if listignored else noop
1220 radd = removed.append
1227 radd = removed.append
1221 dadd = deleted.append
1228 dadd = deleted.append
1222 cadd = clean.append if listclean else noop
1229 cadd = clean.append if listclean else noop
1223 mexact = match.exact
1230 mexact = match.exact
1224 dirignore = self._dirignore
1231 dirignore = self._dirignore
1225 checkexec = self._checkexec
1232 checkexec = self._checkexec
1226 copymap = self._map.copymap
1233 copymap = self._map.copymap
1227 lastnormaltime = self._lastnormaltime
1234 lastnormaltime = self._lastnormaltime
1228
1235
1229 # We need to do full walks when either
1236 # We need to do full walks when either
1230 # - we're listing all clean files, or
1237 # - we're listing all clean files, or
1231 # - match.traversedir does something, because match.traversedir should
1238 # - match.traversedir does something, because match.traversedir should
1232 # be called for every dir in the working dir
1239 # be called for every dir in the working dir
1233 full = listclean or match.traversedir is not None
1240 full = listclean or match.traversedir is not None
1234 for fn, st in pycompat.iteritems(
1241 for fn, st in pycompat.iteritems(
1235 self.walk(match, subrepos, listunknown, listignored, full=full)
1242 self.walk(match, subrepos, listunknown, listignored, full=full)
1236 ):
1243 ):
1237 if not dcontains(fn):
1244 if not dcontains(fn):
1238 if (listignored or mexact(fn)) and dirignore(fn):
1245 if (listignored or mexact(fn)) and dirignore(fn):
1239 if listignored:
1246 if listignored:
1240 iadd(fn)
1247 iadd(fn)
1241 else:
1248 else:
1242 uadd(fn)
1249 uadd(fn)
1243 continue
1250 continue
1244
1251
1245 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1252 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1246 # written like that for performance reasons. dmap[fn] is not a
1253 # written like that for performance reasons. dmap[fn] is not a
1247 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1254 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1248 # opcode has fast paths when the value to be unpacked is a tuple or
1255 # opcode has fast paths when the value to be unpacked is a tuple or
1249 # a list, but falls back to creating a full-fledged iterator in
1256 # a list, but falls back to creating a full-fledged iterator in
1250 # general. That is much slower than simply accessing and storing the
1257 # general. That is much slower than simply accessing and storing the
1251 # tuple members one by one.
1258 # tuple members one by one.
1252 t = dget(fn)
1259 t = dget(fn)
1253 state = t[0]
1260 state = t[0]
1254 mode = t[1]
1261 mode = t[1]
1255 size = t[2]
1262 size = t[2]
1256 time = t[3]
1263 time = t[3]
1257
1264
1258 if not st and state in b"nma":
1265 if not st and state in b"nma":
1259 dadd(fn)
1266 dadd(fn)
1260 elif state == b'n':
1267 elif state == b'n':
1261 if (
1268 if (
1262 size >= 0
1269 size >= 0
1263 and (
1270 and (
1264 (size != st.st_size and size != st.st_size & _rangemask)
1271 (size != st.st_size and size != st.st_size & _rangemask)
1265 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1272 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1266 )
1273 )
1267 or size == -2 # other parent
1274 or size == -2 # other parent
1268 or fn in copymap
1275 or fn in copymap
1269 ):
1276 ):
1270 madd(fn)
1277 madd(fn)
1271 elif (
1278 elif (
1272 time != st[stat.ST_MTIME]
1279 time != st[stat.ST_MTIME]
1273 and time != st[stat.ST_MTIME] & _rangemask
1280 and time != st[stat.ST_MTIME] & _rangemask
1274 ):
1281 ):
1275 ladd(fn)
1282 ladd(fn)
1276 elif st[stat.ST_MTIME] == lastnormaltime:
1283 elif st[stat.ST_MTIME] == lastnormaltime:
1277 # fn may have just been marked as normal and it may have
1284 # fn may have just been marked as normal and it may have
1278 # changed in the same second without changing its size.
1285 # changed in the same second without changing its size.
1279 # This can happen if we quickly do multiple commits.
1286 # This can happen if we quickly do multiple commits.
1280 # Force lookup, so we don't miss such a racy file change.
1287 # Force lookup, so we don't miss such a racy file change.
1281 ladd(fn)
1288 ladd(fn)
1282 elif listclean:
1289 elif listclean:
1283 cadd(fn)
1290 cadd(fn)
1284 elif state == b'm':
1291 elif state == b'm':
1285 madd(fn)
1292 madd(fn)
1286 elif state == b'a':
1293 elif state == b'a':
1287 aadd(fn)
1294 aadd(fn)
1288 elif state == b'r':
1295 elif state == b'r':
1289 radd(fn)
1296 radd(fn)
1290 status = scmutil.status(
1297 status = scmutil.status(
1291 modified, added, removed, deleted, unknown, ignored, clean
1298 modified, added, removed, deleted, unknown, ignored, clean
1292 )
1299 )
1293 return (lookup, status)
1300 return (lookup, status)
1294
1301
1295 def matches(self, match):
1302 def matches(self, match):
1296 '''
1303 '''
1297 return files in the dirstate (in whatever state) filtered by match
1304 return files in the dirstate (in whatever state) filtered by match
1298 '''
1305 '''
1299 dmap = self._map
1306 dmap = self._map
1300 if rustmod is not None:
1307 if rustmod is not None:
1301 dmap = self._map._rustmap
1308 dmap = self._map._rustmap
1302
1309
1303 if match.always():
1310 if match.always():
1304 return dmap.keys()
1311 return dmap.keys()
1305 files = match.files()
1312 files = match.files()
1306 if match.isexact():
1313 if match.isexact():
1307 # fast path -- filter the other way around, since typically files is
1314 # fast path -- filter the other way around, since typically files is
1308 # much smaller than dmap
1315 # much smaller than dmap
1309 return [f for f in files if f in dmap]
1316 return [f for f in files if f in dmap]
1310 if match.prefix() and all(fn in dmap for fn in files):
1317 if match.prefix() and all(fn in dmap for fn in files):
1311 # fast path -- all the values are known to be files, so just return
1318 # fast path -- all the values are known to be files, so just return
1312 # that
1319 # that
1313 return list(files)
1320 return list(files)
1314 return [f for f in dmap if match(f)]
1321 return [f for f in dmap if match(f)]
1315
1322
1316 def _actualfilename(self, tr):
1323 def _actualfilename(self, tr):
1317 if tr:
1324 if tr:
1318 return self._pendingfilename
1325 return self._pendingfilename
1319 else:
1326 else:
1320 return self._filename
1327 return self._filename
1321
1328
1322 def savebackup(self, tr, backupname):
1329 def savebackup(self, tr, backupname):
1323 '''Save current dirstate into backup file'''
1330 '''Save current dirstate into backup file'''
1324 filename = self._actualfilename(tr)
1331 filename = self._actualfilename(tr)
1325 assert backupname != filename
1332 assert backupname != filename
1326
1333
1327 # use '_writedirstate' instead of 'write' to write changes certainly,
1334 # use '_writedirstate' instead of 'write' to write changes certainly,
1328 # because the latter omits writing out if transaction is running.
1335 # because the latter omits writing out if transaction is running.
1329 # output file will be used to create backup of dirstate at this point.
1336 # output file will be used to create backup of dirstate at this point.
1330 if self._dirty or not self._opener.exists(filename):
1337 if self._dirty or not self._opener.exists(filename):
1331 self._writedirstate(
1338 self._writedirstate(
1332 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1339 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1333 )
1340 )
1334
1341
1335 if tr:
1342 if tr:
1336 # ensure that subsequent tr.writepending returns True for
1343 # ensure that subsequent tr.writepending returns True for
1337 # changes written out above, even if dirstate is never
1344 # changes written out above, even if dirstate is never
1338 # changed after this
1345 # changed after this
1339 tr.addfilegenerator(
1346 tr.addfilegenerator(
1340 b'dirstate',
1347 b'dirstate',
1341 (self._filename,),
1348 (self._filename,),
1342 self._writedirstate,
1349 self._writedirstate,
1343 location=b'plain',
1350 location=b'plain',
1344 )
1351 )
1345
1352
1346 # ensure that pending file written above is unlinked at
1353 # ensure that pending file written above is unlinked at
1347 # failure, even if tr.writepending isn't invoked until the
1354 # failure, even if tr.writepending isn't invoked until the
1348 # end of this transaction
1355 # end of this transaction
1349 tr.registertmp(filename, location=b'plain')
1356 tr.registertmp(filename, location=b'plain')
1350
1357
1351 self._opener.tryunlink(backupname)
1358 self._opener.tryunlink(backupname)
1352 # hardlink backup is okay because _writedirstate is always called
1359 # hardlink backup is okay because _writedirstate is always called
1353 # with an "atomictemp=True" file.
1360 # with an "atomictemp=True" file.
1354 util.copyfile(
1361 util.copyfile(
1355 self._opener.join(filename),
1362 self._opener.join(filename),
1356 self._opener.join(backupname),
1363 self._opener.join(backupname),
1357 hardlink=True,
1364 hardlink=True,
1358 )
1365 )
1359
1366
1360 def restorebackup(self, tr, backupname):
1367 def restorebackup(self, tr, backupname):
1361 '''Restore dirstate by backup file'''
1368 '''Restore dirstate by backup file'''
1362 # this "invalidate()" prevents "wlock.release()" from writing
1369 # this "invalidate()" prevents "wlock.release()" from writing
1363 # changes of dirstate out after restoring from backup file
1370 # changes of dirstate out after restoring from backup file
1364 self.invalidate()
1371 self.invalidate()
1365 filename = self._actualfilename(tr)
1372 filename = self._actualfilename(tr)
1366 o = self._opener
1373 o = self._opener
1367 if util.samefile(o.join(backupname), o.join(filename)):
1374 if util.samefile(o.join(backupname), o.join(filename)):
1368 o.unlink(backupname)
1375 o.unlink(backupname)
1369 else:
1376 else:
1370 o.rename(backupname, filename, checkambig=True)
1377 o.rename(backupname, filename, checkambig=True)
1371
1378
1372 def clearbackup(self, tr, backupname):
1379 def clearbackup(self, tr, backupname):
1373 '''Clear backup file'''
1380 '''Clear backup file'''
1374 self._opener.unlink(backupname)
1381 self._opener.unlink(backupname)
1375
1382
1376
1383
1377 class dirstatemap(object):
1384 class dirstatemap(object):
1378 """Map encapsulating the dirstate's contents.
1385 """Map encapsulating the dirstate's contents.
1379
1386
1380 The dirstate contains the following state:
1387 The dirstate contains the following state:
1381
1388
1382 - `identity` is the identity of the dirstate file, which can be used to
1389 - `identity` is the identity of the dirstate file, which can be used to
1383 detect when changes have occurred to the dirstate file.
1390 detect when changes have occurred to the dirstate file.
1384
1391
1385 - `parents` is a pair containing the parents of the working copy. The
1392 - `parents` is a pair containing the parents of the working copy. The
1386 parents are updated by calling `setparents`.
1393 parents are updated by calling `setparents`.
1387
1394
1388 - the state map maps filenames to tuples of (state, mode, size, mtime),
1395 - the state map maps filenames to tuples of (state, mode, size, mtime),
1389 where state is a single character representing 'normal', 'added',
1396 where state is a single character representing 'normal', 'added',
1390 'removed', or 'merged'. It is read by treating the dirstate as a
1397 'removed', or 'merged'. It is read by treating the dirstate as a
1391 dict. File state is updated by calling the `addfile`, `removefile` and
1398 dict. File state is updated by calling the `addfile`, `removefile` and
1392 `dropfile` methods.
1399 `dropfile` methods.
1393
1400
1394 - `copymap` maps destination filenames to their source filename.
1401 - `copymap` maps destination filenames to their source filename.
1395
1402
1396 The dirstate also provides the following views onto the state:
1403 The dirstate also provides the following views onto the state:
1397
1404
1398 - `nonnormalset` is a set of the filenames that have state other
1405 - `nonnormalset` is a set of the filenames that have state other
1399 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1406 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1400
1407
1401 - `otherparentset` is a set of the filenames that are marked as coming
1408 - `otherparentset` is a set of the filenames that are marked as coming
1402 from the second parent when the dirstate is currently being merged.
1409 from the second parent when the dirstate is currently being merged.
1403
1410
1404 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1411 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1405 form that they appear as in the dirstate.
1412 form that they appear as in the dirstate.
1406
1413
1407 - `dirfoldmap` is a dict mapping normalized directory names to the
1414 - `dirfoldmap` is a dict mapping normalized directory names to the
1408 denormalized form that they appear as in the dirstate.
1415 denormalized form that they appear as in the dirstate.
1409 """
1416 """
1410
1417
1411 def __init__(self, ui, opener, root):
1418 def __init__(self, ui, opener, root):
1412 self._ui = ui
1419 self._ui = ui
1413 self._opener = opener
1420 self._opener = opener
1414 self._root = root
1421 self._root = root
1415 self._filename = b'dirstate'
1422 self._filename = b'dirstate'
1416
1423
1417 self._parents = None
1424 self._parents = None
1418 self._dirtyparents = False
1425 self._dirtyparents = False
1419
1426
1420 # for consistent view between _pl() and _read() invocations
1427 # for consistent view between _pl() and _read() invocations
1421 self._pendingmode = None
1428 self._pendingmode = None
1422
1429
1423 @propertycache
1430 @propertycache
1424 def _map(self):
1431 def _map(self):
1425 self._map = {}
1432 self._map = {}
1426 self.read()
1433 self.read()
1427 return self._map
1434 return self._map
1428
1435
1429 @propertycache
1436 @propertycache
1430 def copymap(self):
1437 def copymap(self):
1431 self.copymap = {}
1438 self.copymap = {}
1432 self._map
1439 self._map
1433 return self.copymap
1440 return self.copymap
1434
1441
1435 def clear(self):
1442 def clear(self):
1436 self._map.clear()
1443 self._map.clear()
1437 self.copymap.clear()
1444 self.copymap.clear()
1438 self.setparents(nullid, nullid)
1445 self.setparents(nullid, nullid)
1439 util.clearcachedproperty(self, b"_dirs")
1446 util.clearcachedproperty(self, b"_dirs")
1440 util.clearcachedproperty(self, b"_alldirs")
1447 util.clearcachedproperty(self, b"_alldirs")
1441 util.clearcachedproperty(self, b"filefoldmap")
1448 util.clearcachedproperty(self, b"filefoldmap")
1442 util.clearcachedproperty(self, b"dirfoldmap")
1449 util.clearcachedproperty(self, b"dirfoldmap")
1443 util.clearcachedproperty(self, b"nonnormalset")
1450 util.clearcachedproperty(self, b"nonnormalset")
1444 util.clearcachedproperty(self, b"otherparentset")
1451 util.clearcachedproperty(self, b"otherparentset")
1445
1452
1446 def items(self):
1453 def items(self):
1447 return pycompat.iteritems(self._map)
1454 return pycompat.iteritems(self._map)
1448
1455
1449 # forward for python2,3 compat
1456 # forward for python2,3 compat
1450 iteritems = items
1457 iteritems = items
1451
1458
1452 def __len__(self):
1459 def __len__(self):
1453 return len(self._map)
1460 return len(self._map)
1454
1461
1455 def __iter__(self):
1462 def __iter__(self):
1456 return iter(self._map)
1463 return iter(self._map)
1457
1464
1458 def get(self, key, default=None):
1465 def get(self, key, default=None):
1459 return self._map.get(key, default)
1466 return self._map.get(key, default)
1460
1467
1461 def __contains__(self, key):
1468 def __contains__(self, key):
1462 return key in self._map
1469 return key in self._map
1463
1470
1464 def __getitem__(self, key):
1471 def __getitem__(self, key):
1465 return self._map[key]
1472 return self._map[key]
1466
1473
1467 def keys(self):
1474 def keys(self):
1468 return self._map.keys()
1475 return self._map.keys()
1469
1476
1470 def preload(self):
1477 def preload(self):
1471 """Loads the underlying data, if it's not already loaded"""
1478 """Loads the underlying data, if it's not already loaded"""
1472 self._map
1479 self._map
1473
1480
1474 def addfile(self, f, oldstate, state, mode, size, mtime):
1481 def addfile(self, f, oldstate, state, mode, size, mtime):
1475 """Add a tracked file to the dirstate."""
1482 """Add a tracked file to the dirstate."""
1476 if oldstate in b"?r" and "_dirs" in self.__dict__:
1483 if oldstate in b"?r" and "_dirs" in self.__dict__:
1477 self._dirs.addpath(f)
1484 self._dirs.addpath(f)
1478 if oldstate == b"?" and "_alldirs" in self.__dict__:
1485 if oldstate == b"?" and "_alldirs" in self.__dict__:
1479 self._alldirs.addpath(f)
1486 self._alldirs.addpath(f)
1480 self._map[f] = dirstatetuple(state, mode, size, mtime)
1487 self._map[f] = dirstatetuple(state, mode, size, mtime)
1481 if state != b'n' or mtime == -1:
1488 if state != b'n' or mtime == -1:
1482 self.nonnormalset.add(f)
1489 self.nonnormalset.add(f)
1483 if size == -2:
1490 if size == -2:
1484 self.otherparentset.add(f)
1491 self.otherparentset.add(f)
1485
1492
1486 def removefile(self, f, oldstate, size):
1493 def removefile(self, f, oldstate, size):
1487 """
1494 """
1488 Mark a file as removed in the dirstate.
1495 Mark a file as removed in the dirstate.
1489
1496
1490 The `size` parameter is used to store sentinel values that indicate
1497 The `size` parameter is used to store sentinel values that indicate
1491 the file's previous state. In the future, we should refactor this
1498 the file's previous state. In the future, we should refactor this
1492 to be more explicit about what that state is.
1499 to be more explicit about what that state is.
1493 """
1500 """
1494 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1501 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1495 self._dirs.delpath(f)
1502 self._dirs.delpath(f)
1496 if oldstate == b"?" and "_alldirs" in self.__dict__:
1503 if oldstate == b"?" and "_alldirs" in self.__dict__:
1497 self._alldirs.addpath(f)
1504 self._alldirs.addpath(f)
1498 if "filefoldmap" in self.__dict__:
1505 if "filefoldmap" in self.__dict__:
1499 normed = util.normcase(f)
1506 normed = util.normcase(f)
1500 self.filefoldmap.pop(normed, None)
1507 self.filefoldmap.pop(normed, None)
1501 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1508 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1502 self.nonnormalset.add(f)
1509 self.nonnormalset.add(f)
1503
1510
1504 def dropfile(self, f, oldstate):
1511 def dropfile(self, f, oldstate):
1505 """
1512 """
1506 Remove a file from the dirstate. Returns True if the file was
1513 Remove a file from the dirstate. Returns True if the file was
1507 previously recorded.
1514 previously recorded.
1508 """
1515 """
1509 exists = self._map.pop(f, None) is not None
1516 exists = self._map.pop(f, None) is not None
1510 if exists:
1517 if exists:
1511 if oldstate != b"r" and "_dirs" in self.__dict__:
1518 if oldstate != b"r" and "_dirs" in self.__dict__:
1512 self._dirs.delpath(f)
1519 self._dirs.delpath(f)
1513 if "_alldirs" in self.__dict__:
1520 if "_alldirs" in self.__dict__:
1514 self._alldirs.delpath(f)
1521 self._alldirs.delpath(f)
1515 if "filefoldmap" in self.__dict__:
1522 if "filefoldmap" in self.__dict__:
1516 normed = util.normcase(f)
1523 normed = util.normcase(f)
1517 self.filefoldmap.pop(normed, None)
1524 self.filefoldmap.pop(normed, None)
1518 self.nonnormalset.discard(f)
1525 self.nonnormalset.discard(f)
1519 return exists
1526 return exists
1520
1527
1521 def clearambiguoustimes(self, files, now):
1528 def clearambiguoustimes(self, files, now):
1522 for f in files:
1529 for f in files:
1523 e = self.get(f)
1530 e = self.get(f)
1524 if e is not None and e[0] == b'n' and e[3] == now:
1531 if e is not None and e[0] == b'n' and e[3] == now:
1525 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1532 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1526 self.nonnormalset.add(f)
1533 self.nonnormalset.add(f)
1527
1534
1528 def nonnormalentries(self):
1535 def nonnormalentries(self):
1529 '''Compute the nonnormal dirstate entries from the dmap'''
1536 '''Compute the nonnormal dirstate entries from the dmap'''
1530 try:
1537 try:
1531 return parsers.nonnormalotherparententries(self._map)
1538 return parsers.nonnormalotherparententries(self._map)
1532 except AttributeError:
1539 except AttributeError:
1533 nonnorm = set()
1540 nonnorm = set()
1534 otherparent = set()
1541 otherparent = set()
1535 for fname, e in pycompat.iteritems(self._map):
1542 for fname, e in pycompat.iteritems(self._map):
1536 if e[0] != b'n' or e[3] == -1:
1543 if e[0] != b'n' or e[3] == -1:
1537 nonnorm.add(fname)
1544 nonnorm.add(fname)
1538 if e[0] == b'n' and e[2] == -2:
1545 if e[0] == b'n' and e[2] == -2:
1539 otherparent.add(fname)
1546 otherparent.add(fname)
1540 return nonnorm, otherparent
1547 return nonnorm, otherparent
1541
1548
1542 @propertycache
1549 @propertycache
1543 def filefoldmap(self):
1550 def filefoldmap(self):
1544 """Returns a dictionary mapping normalized case paths to their
1551 """Returns a dictionary mapping normalized case paths to their
1545 non-normalized versions.
1552 non-normalized versions.
1546 """
1553 """
1547 try:
1554 try:
1548 makefilefoldmap = parsers.make_file_foldmap
1555 makefilefoldmap = parsers.make_file_foldmap
1549 except AttributeError:
1556 except AttributeError:
1550 pass
1557 pass
1551 else:
1558 else:
1552 return makefilefoldmap(
1559 return makefilefoldmap(
1553 self._map, util.normcasespec, util.normcasefallback
1560 self._map, util.normcasespec, util.normcasefallback
1554 )
1561 )
1555
1562
1556 f = {}
1563 f = {}
1557 normcase = util.normcase
1564 normcase = util.normcase
1558 for name, s in pycompat.iteritems(self._map):
1565 for name, s in pycompat.iteritems(self._map):
1559 if s[0] != b'r':
1566 if s[0] != b'r':
1560 f[normcase(name)] = name
1567 f[normcase(name)] = name
1561 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1568 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1562 return f
1569 return f
1563
1570
1564 def hastrackeddir(self, d):
1571 def hastrackeddir(self, d):
1565 """
1572 """
1566 Returns True if the dirstate contains a tracked (not removed) file
1573 Returns True if the dirstate contains a tracked (not removed) file
1567 in this directory.
1574 in this directory.
1568 """
1575 """
1569 return d in self._dirs
1576 return d in self._dirs
1570
1577
1571 def hasdir(self, d):
1578 def hasdir(self, d):
1572 """
1579 """
1573 Returns True if the dirstate contains a file (tracked or removed)
1580 Returns True if the dirstate contains a file (tracked or removed)
1574 in this directory.
1581 in this directory.
1575 """
1582 """
1576 return d in self._alldirs
1583 return d in self._alldirs
1577
1584
1578 @propertycache
1585 @propertycache
1579 def _dirs(self):
1586 def _dirs(self):
1580 return pathutil.dirs(self._map, b'r')
1587 return pathutil.dirs(self._map, b'r')
1581
1588
1582 @propertycache
1589 @propertycache
1583 def _alldirs(self):
1590 def _alldirs(self):
1584 return pathutil.dirs(self._map)
1591 return pathutil.dirs(self._map)
1585
1592
1586 def _opendirstatefile(self):
1593 def _opendirstatefile(self):
1587 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1594 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1588 if self._pendingmode is not None and self._pendingmode != mode:
1595 if self._pendingmode is not None and self._pendingmode != mode:
1589 fp.close()
1596 fp.close()
1590 raise error.Abort(
1597 raise error.Abort(
1591 _(b'working directory state may be changed parallelly')
1598 _(b'working directory state may be changed parallelly')
1592 )
1599 )
1593 self._pendingmode = mode
1600 self._pendingmode = mode
1594 return fp
1601 return fp
1595
1602
1596 def parents(self):
1603 def parents(self):
1597 if not self._parents:
1604 if not self._parents:
1598 try:
1605 try:
1599 fp = self._opendirstatefile()
1606 fp = self._opendirstatefile()
1600 st = fp.read(40)
1607 st = fp.read(40)
1601 fp.close()
1608 fp.close()
1602 except IOError as err:
1609 except IOError as err:
1603 if err.errno != errno.ENOENT:
1610 if err.errno != errno.ENOENT:
1604 raise
1611 raise
1605 # File doesn't exist, so the current state is empty
1612 # File doesn't exist, so the current state is empty
1606 st = b''
1613 st = b''
1607
1614
1608 l = len(st)
1615 l = len(st)
1609 if l == 40:
1616 if l == 40:
1610 self._parents = (st[:20], st[20:40])
1617 self._parents = (st[:20], st[20:40])
1611 elif l == 0:
1618 elif l == 0:
1612 self._parents = (nullid, nullid)
1619 self._parents = (nullid, nullid)
1613 else:
1620 else:
1614 raise error.Abort(
1621 raise error.Abort(
1615 _(b'working directory state appears damaged!')
1622 _(b'working directory state appears damaged!')
1616 )
1623 )
1617
1624
1618 return self._parents
1625 return self._parents
1619
1626
1620 def setparents(self, p1, p2):
1627 def setparents(self, p1, p2):
1621 self._parents = (p1, p2)
1628 self._parents = (p1, p2)
1622 self._dirtyparents = True
1629 self._dirtyparents = True
1623
1630
1624 def read(self):
1631 def read(self):
1625 # ignore HG_PENDING because identity is used only for writing
1632 # ignore HG_PENDING because identity is used only for writing
1626 self.identity = util.filestat.frompath(
1633 self.identity = util.filestat.frompath(
1627 self._opener.join(self._filename)
1634 self._opener.join(self._filename)
1628 )
1635 )
1629
1636
1630 try:
1637 try:
1631 fp = self._opendirstatefile()
1638 fp = self._opendirstatefile()
1632 try:
1639 try:
1633 st = fp.read()
1640 st = fp.read()
1634 finally:
1641 finally:
1635 fp.close()
1642 fp.close()
1636 except IOError as err:
1643 except IOError as err:
1637 if err.errno != errno.ENOENT:
1644 if err.errno != errno.ENOENT:
1638 raise
1645 raise
1639 return
1646 return
1640 if not st:
1647 if not st:
1641 return
1648 return
1642
1649
1643 if util.safehasattr(parsers, b'dict_new_presized'):
1650 if util.safehasattr(parsers, b'dict_new_presized'):
1644 # Make an estimate of the number of files in the dirstate based on
1651 # Make an estimate of the number of files in the dirstate based on
1645 # its size. From a linear regression on a set of real-world repos,
1652 # its size. From a linear regression on a set of real-world repos,
1646 # all over 10,000 files, the size of a dirstate entry is 85
1653 # all over 10,000 files, the size of a dirstate entry is 85
1647 # bytes. The cost of resizing is significantly higher than the cost
1654 # bytes. The cost of resizing is significantly higher than the cost
1648 # of filling in a larger presized dict, so subtract 20% from the
1655 # of filling in a larger presized dict, so subtract 20% from the
1649 # size.
1656 # size.
1650 #
1657 #
1651 # This heuristic is imperfect in many ways, so in a future dirstate
1658 # This heuristic is imperfect in many ways, so in a future dirstate
1652 # format update it makes sense to just record the number of entries
1659 # format update it makes sense to just record the number of entries
1653 # on write.
1660 # on write.
1654 self._map = parsers.dict_new_presized(len(st) // 71)
1661 self._map = parsers.dict_new_presized(len(st) // 71)
1655
1662
1656 # Python's garbage collector triggers a GC each time a certain number
1663 # Python's garbage collector triggers a GC each time a certain number
1657 # of container objects (the number being defined by
1664 # of container objects (the number being defined by
1658 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1665 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1659 # for each file in the dirstate. The C version then immediately marks
1666 # for each file in the dirstate. The C version then immediately marks
1660 # them as not to be tracked by the collector. However, this has no
1667 # them as not to be tracked by the collector. However, this has no
1661 # effect on when GCs are triggered, only on what objects the GC looks
1668 # effect on when GCs are triggered, only on what objects the GC looks
1662 # into. This means that O(number of files) GCs are unavoidable.
1669 # into. This means that O(number of files) GCs are unavoidable.
1663 # Depending on when in the process's lifetime the dirstate is parsed,
1670 # Depending on when in the process's lifetime the dirstate is parsed,
1664 # this can get very expensive. As a workaround, disable GC while
1671 # this can get very expensive. As a workaround, disable GC while
1665 # parsing the dirstate.
1672 # parsing the dirstate.
1666 #
1673 #
1667 # (we cannot decorate the function directly since it is in a C module)
1674 # (we cannot decorate the function directly since it is in a C module)
1668 parse_dirstate = util.nogc(parsers.parse_dirstate)
1675 parse_dirstate = util.nogc(parsers.parse_dirstate)
1669 p = parse_dirstate(self._map, self.copymap, st)
1676 p = parse_dirstate(self._map, self.copymap, st)
1670 if not self._dirtyparents:
1677 if not self._dirtyparents:
1671 self.setparents(*p)
1678 self.setparents(*p)
1672
1679
1673 # Avoid excess attribute lookups by fast pathing certain checks
1680 # Avoid excess attribute lookups by fast pathing certain checks
1674 self.__contains__ = self._map.__contains__
1681 self.__contains__ = self._map.__contains__
1675 self.__getitem__ = self._map.__getitem__
1682 self.__getitem__ = self._map.__getitem__
1676 self.get = self._map.get
1683 self.get = self._map.get
1677
1684
1678 def write(self, st, now):
1685 def write(self, st, now):
1679 st.write(
1686 st.write(
1680 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1687 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1681 )
1688 )
1682 st.close()
1689 st.close()
1683 self._dirtyparents = False
1690 self._dirtyparents = False
1684 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1691 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1685
1692
1686 @propertycache
1693 @propertycache
1687 def nonnormalset(self):
1694 def nonnormalset(self):
1688 nonnorm, otherparents = self.nonnormalentries()
1695 nonnorm, otherparents = self.nonnormalentries()
1689 self.otherparentset = otherparents
1696 self.otherparentset = otherparents
1690 return nonnorm
1697 return nonnorm
1691
1698
1692 @propertycache
1699 @propertycache
1693 def otherparentset(self):
1700 def otherparentset(self):
1694 nonnorm, otherparents = self.nonnormalentries()
1701 nonnorm, otherparents = self.nonnormalentries()
1695 self.nonnormalset = nonnorm
1702 self.nonnormalset = nonnorm
1696 return otherparents
1703 return otherparents
1697
1704
1698 @propertycache
1705 @propertycache
1699 def identity(self):
1706 def identity(self):
1700 self._map
1707 self._map
1701 return self.identity
1708 return self.identity
1702
1709
1703 @propertycache
1710 @propertycache
1704 def dirfoldmap(self):
1711 def dirfoldmap(self):
1705 f = {}
1712 f = {}
1706 normcase = util.normcase
1713 normcase = util.normcase
1707 for name in self._dirs:
1714 for name in self._dirs:
1708 f[normcase(name)] = name
1715 f[normcase(name)] = name
1709 return f
1716 return f
1710
1717
1711
1718
1712 if rustmod is not None:
1719 if rustmod is not None:
1713
1720
1714 class dirstatemap(object):
1721 class dirstatemap(object):
1715 def __init__(self, ui, opener, root):
1722 def __init__(self, ui, opener, root):
1716 self._ui = ui
1723 self._ui = ui
1717 self._opener = opener
1724 self._opener = opener
1718 self._root = root
1725 self._root = root
1719 self._filename = b'dirstate'
1726 self._filename = b'dirstate'
1720 self._parents = None
1727 self._parents = None
1721 self._dirtyparents = False
1728 self._dirtyparents = False
1722
1729
1723 # for consistent view between _pl() and _read() invocations
1730 # for consistent view between _pl() and _read() invocations
1724 self._pendingmode = None
1731 self._pendingmode = None
1725
1732
1726 def addfile(self, *args, **kwargs):
1733 def addfile(self, *args, **kwargs):
1727 return self._rustmap.addfile(*args, **kwargs)
1734 return self._rustmap.addfile(*args, **kwargs)
1728
1735
1729 def removefile(self, *args, **kwargs):
1736 def removefile(self, *args, **kwargs):
1730 return self._rustmap.removefile(*args, **kwargs)
1737 return self._rustmap.removefile(*args, **kwargs)
1731
1738
1732 def dropfile(self, *args, **kwargs):
1739 def dropfile(self, *args, **kwargs):
1733 return self._rustmap.dropfile(*args, **kwargs)
1740 return self._rustmap.dropfile(*args, **kwargs)
1734
1741
1735 def clearambiguoustimes(self, *args, **kwargs):
1742 def clearambiguoustimes(self, *args, **kwargs):
1736 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1743 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1737
1744
1738 def nonnormalentries(self):
1745 def nonnormalentries(self):
1739 return self._rustmap.nonnormalentries()
1746 return self._rustmap.nonnormalentries()
1740
1747
1741 def get(self, *args, **kwargs):
1748 def get(self, *args, **kwargs):
1742 return self._rustmap.get(*args, **kwargs)
1749 return self._rustmap.get(*args, **kwargs)
1743
1750
1744 @propertycache
1751 @propertycache
1745 def _rustmap(self):
1752 def _rustmap(self):
1746 """
1753 """
1747 Fills the Dirstatemap when called.
1754 Fills the Dirstatemap when called.
1748 Use `self._inner_rustmap` if reading the dirstate is not necessary.
1755 Use `self._inner_rustmap` if reading the dirstate is not necessary.
1749 """
1756 """
1750 self._rustmap = self._inner_rustmap
1757 self._rustmap = self._inner_rustmap
1751 self.read()
1758 self.read()
1752 return self._rustmap
1759 return self._rustmap
1753
1760
1754 @propertycache
1761 @propertycache
1755 def _inner_rustmap(self):
1762 def _inner_rustmap(self):
1756 """
1763 """
1757 Does not fill the Dirstatemap when called. This allows for
1764 Does not fill the Dirstatemap when called. This allows for
1758 optimizations where only setting/getting the parents is needed.
1765 optimizations where only setting/getting the parents is needed.
1759 """
1766 """
1760 self._inner_rustmap = rustmod.DirstateMap(self._root)
1767 self._inner_rustmap = rustmod.DirstateMap(self._root)
1761 return self._inner_rustmap
1768 return self._inner_rustmap
1762
1769
1763 @property
1770 @property
1764 def copymap(self):
1771 def copymap(self):
1765 return self._rustmap.copymap()
1772 return self._rustmap.copymap()
1766
1773
1767 def preload(self):
1774 def preload(self):
1768 self._rustmap
1775 self._rustmap
1769
1776
1770 def clear(self):
1777 def clear(self):
1771 self._rustmap.clear()
1778 self._rustmap.clear()
1772 self._inner_rustmap.clear()
1779 self._inner_rustmap.clear()
1773 self.setparents(nullid, nullid)
1780 self.setparents(nullid, nullid)
1774 util.clearcachedproperty(self, b"_dirs")
1781 util.clearcachedproperty(self, b"_dirs")
1775 util.clearcachedproperty(self, b"_alldirs")
1782 util.clearcachedproperty(self, b"_alldirs")
1776 util.clearcachedproperty(self, b"dirfoldmap")
1783 util.clearcachedproperty(self, b"dirfoldmap")
1777
1784
1778 def items(self):
1785 def items(self):
1779 return self._rustmap.items()
1786 return self._rustmap.items()
1780
1787
1781 def keys(self):
1788 def keys(self):
1782 return iter(self._rustmap)
1789 return iter(self._rustmap)
1783
1790
1784 def __contains__(self, key):
1791 def __contains__(self, key):
1785 return key in self._rustmap
1792 return key in self._rustmap
1786
1793
1787 def __getitem__(self, item):
1794 def __getitem__(self, item):
1788 return self._rustmap[item]
1795 return self._rustmap[item]
1789
1796
1790 def __len__(self):
1797 def __len__(self):
1791 return len(self._rustmap)
1798 return len(self._rustmap)
1792
1799
1793 def __iter__(self):
1800 def __iter__(self):
1794 return iter(self._rustmap)
1801 return iter(self._rustmap)
1795
1802
1796 # forward for python2,3 compat
1803 # forward for python2,3 compat
1797 iteritems = items
1804 iteritems = items
1798
1805
1799 def _opendirstatefile(self):
1806 def _opendirstatefile(self):
1800 fp, mode = txnutil.trypending(
1807 fp, mode = txnutil.trypending(
1801 self._root, self._opener, self._filename
1808 self._root, self._opener, self._filename
1802 )
1809 )
1803 if self._pendingmode is not None and self._pendingmode != mode:
1810 if self._pendingmode is not None and self._pendingmode != mode:
1804 fp.close()
1811 fp.close()
1805 raise error.Abort(
1812 raise error.Abort(
1806 _(b'working directory state may be changed parallelly')
1813 _(b'working directory state may be changed parallelly')
1807 )
1814 )
1808 self._pendingmode = mode
1815 self._pendingmode = mode
1809 return fp
1816 return fp
1810
1817
1811 def setparents(self, p1, p2):
1818 def setparents(self, p1, p2):
1812 self._rustmap.setparents(p1, p2)
1819 self._rustmap.setparents(p1, p2)
1813 self._parents = (p1, p2)
1820 self._parents = (p1, p2)
1814 self._dirtyparents = True
1821 self._dirtyparents = True
1815
1822
1816 def parents(self):
1823 def parents(self):
1817 if not self._parents:
1824 if not self._parents:
1818 try:
1825 try:
1819 fp = self._opendirstatefile()
1826 fp = self._opendirstatefile()
1820 st = fp.read(40)
1827 st = fp.read(40)
1821 fp.close()
1828 fp.close()
1822 except IOError as err:
1829 except IOError as err:
1823 if err.errno != errno.ENOENT:
1830 if err.errno != errno.ENOENT:
1824 raise
1831 raise
1825 # File doesn't exist, so the current state is empty
1832 # File doesn't exist, so the current state is empty
1826 st = b''
1833 st = b''
1827
1834
1828 try:
1835 try:
1829 self._parents = self._inner_rustmap.parents(st)
1836 self._parents = self._inner_rustmap.parents(st)
1830 except ValueError:
1837 except ValueError:
1831 raise error.Abort(
1838 raise error.Abort(
1832 _(b'working directory state appears damaged!')
1839 _(b'working directory state appears damaged!')
1833 )
1840 )
1834
1841
1835 return self._parents
1842 return self._parents
1836
1843
1837 def read(self):
1844 def read(self):
1838 # ignore HG_PENDING because identity is used only for writing
1845 # ignore HG_PENDING because identity is used only for writing
1839 self.identity = util.filestat.frompath(
1846 self.identity = util.filestat.frompath(
1840 self._opener.join(self._filename)
1847 self._opener.join(self._filename)
1841 )
1848 )
1842
1849
1843 try:
1850 try:
1844 fp = self._opendirstatefile()
1851 fp = self._opendirstatefile()
1845 try:
1852 try:
1846 st = fp.read()
1853 st = fp.read()
1847 finally:
1854 finally:
1848 fp.close()
1855 fp.close()
1849 except IOError as err:
1856 except IOError as err:
1850 if err.errno != errno.ENOENT:
1857 if err.errno != errno.ENOENT:
1851 raise
1858 raise
1852 return
1859 return
1853 if not st:
1860 if not st:
1854 return
1861 return
1855
1862
1856 parse_dirstate = util.nogc(self._rustmap.read)
1863 parse_dirstate = util.nogc(self._rustmap.read)
1857 parents = parse_dirstate(st)
1864 parents = parse_dirstate(st)
1858 if parents and not self._dirtyparents:
1865 if parents and not self._dirtyparents:
1859 self.setparents(*parents)
1866 self.setparents(*parents)
1860
1867
1861 self.__contains__ = self._rustmap.__contains__
1868 self.__contains__ = self._rustmap.__contains__
1862 self.__getitem__ = self._rustmap.__getitem__
1869 self.__getitem__ = self._rustmap.__getitem__
1863 self.get = self._rustmap.get
1870 self.get = self._rustmap.get
1864
1871
1865 def write(self, st, now):
1872 def write(self, st, now):
1866 parents = self.parents()
1873 parents = self.parents()
1867 st.write(self._rustmap.write(parents[0], parents[1], now))
1874 st.write(self._rustmap.write(parents[0], parents[1], now))
1868 st.close()
1875 st.close()
1869 self._dirtyparents = False
1876 self._dirtyparents = False
1870
1877
1871 @propertycache
1878 @propertycache
1872 def filefoldmap(self):
1879 def filefoldmap(self):
1873 """Returns a dictionary mapping normalized case paths to their
1880 """Returns a dictionary mapping normalized case paths to their
1874 non-normalized versions.
1881 non-normalized versions.
1875 """
1882 """
1876 return self._rustmap.filefoldmapasdict()
1883 return self._rustmap.filefoldmapasdict()
1877
1884
1878 def hastrackeddir(self, d):
1885 def hastrackeddir(self, d):
1879 self._dirs # Trigger Python's propertycache
1886 self._dirs # Trigger Python's propertycache
1880 return self._rustmap.hastrackeddir(d)
1887 return self._rustmap.hastrackeddir(d)
1881
1888
1882 def hasdir(self, d):
1889 def hasdir(self, d):
1883 self._dirs # Trigger Python's propertycache
1890 self._dirs # Trigger Python's propertycache
1884 return self._rustmap.hasdir(d)
1891 return self._rustmap.hasdir(d)
1885
1892
1886 @propertycache
1893 @propertycache
1887 def _dirs(self):
1894 def _dirs(self):
1888 return self._rustmap.getdirs()
1895 return self._rustmap.getdirs()
1889
1896
1890 @propertycache
1897 @propertycache
1891 def _alldirs(self):
1898 def _alldirs(self):
1892 return self._rustmap.getalldirs()
1899 return self._rustmap.getalldirs()
1893
1900
1894 @propertycache
1901 @propertycache
1895 def identity(self):
1902 def identity(self):
1896 self._rustmap
1903 self._rustmap
1897 return self.identity
1904 return self.identity
1898
1905
1899 @property
1906 @property
1900 def nonnormalset(self):
1907 def nonnormalset(self):
1901 nonnorm = self._rustmap.non_normal_entries()
1908 nonnorm = self._rustmap.non_normal_entries()
1902 return nonnorm
1909 return nonnorm
1903
1910
1904 @propertycache
1911 @propertycache
1905 def otherparentset(self):
1912 def otherparentset(self):
1906 otherparents = self._rustmap.other_parent_entries()
1913 otherparents = self._rustmap.other_parent_entries()
1907 return otherparents
1914 return otherparents
1908
1915
1909 @propertycache
1916 @propertycache
1910 def dirfoldmap(self):
1917 def dirfoldmap(self):
1911 f = {}
1918 f = {}
1912 normcase = util.normcase
1919 normcase = util.normcase
1913 for name in self._dirs:
1920 for name in self._dirs:
1914 f[normcase(name)] = name
1921 f[normcase(name)] = name
1915 return f
1922 return f
@@ -1,3812 +1,3814 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 rcutil,
56 rcutil,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store as storemod,
62 store as storemod,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70
70
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75
75
76 from .utils import (
76 from .utils import (
77 hashutil,
77 hashutil,
78 procutil,
78 procutil,
79 stringutil,
79 stringutil,
80 )
80 )
81
81
82 from .revlogutils import constants as revlogconst
82 from .revlogutils import constants as revlogconst
83
83
84 release = lockmod.release
84 release = lockmod.release
85 urlerr = util.urlerr
85 urlerr = util.urlerr
86 urlreq = util.urlreq
86 urlreq = util.urlreq
87
87
88 # set of (path, vfs-location) tuples. vfs-location is:
88 # set of (path, vfs-location) tuples. vfs-location is:
89 # - 'plain for vfs relative paths
89 # - 'plain for vfs relative paths
90 # - '' for svfs relative paths
90 # - '' for svfs relative paths
91 _cachedfiles = set()
91 _cachedfiles = set()
92
92
93
93
94 class _basefilecache(scmutil.filecache):
94 class _basefilecache(scmutil.filecache):
95 """All filecache usage on repo are done for logic that should be unfiltered
95 """All filecache usage on repo are done for logic that should be unfiltered
96 """
96 """
97
97
98 def __get__(self, repo, type=None):
98 def __get__(self, repo, type=None):
99 if repo is None:
99 if repo is None:
100 return self
100 return self
101 # proxy to unfiltered __dict__ since filtered repo has no entry
101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 unfi = repo.unfiltered()
102 unfi = repo.unfiltered()
103 try:
103 try:
104 return unfi.__dict__[self.sname]
104 return unfi.__dict__[self.sname]
105 except KeyError:
105 except KeyError:
106 pass
106 pass
107 return super(_basefilecache, self).__get__(unfi, type)
107 return super(_basefilecache, self).__get__(unfi, type)
108
108
109 def set(self, repo, value):
109 def set(self, repo, value):
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111
111
112
112
113 class repofilecache(_basefilecache):
113 class repofilecache(_basefilecache):
114 """filecache for files in .hg but outside of .hg/store"""
114 """filecache for files in .hg but outside of .hg/store"""
115
115
116 def __init__(self, *paths):
116 def __init__(self, *paths):
117 super(repofilecache, self).__init__(*paths)
117 super(repofilecache, self).__init__(*paths)
118 for path in paths:
118 for path in paths:
119 _cachedfiles.add((path, b'plain'))
119 _cachedfiles.add((path, b'plain'))
120
120
121 def join(self, obj, fname):
121 def join(self, obj, fname):
122 return obj.vfs.join(fname)
122 return obj.vfs.join(fname)
123
123
124
124
125 class storecache(_basefilecache):
125 class storecache(_basefilecache):
126 """filecache for files in the store"""
126 """filecache for files in the store"""
127
127
128 def __init__(self, *paths):
128 def __init__(self, *paths):
129 super(storecache, self).__init__(*paths)
129 super(storecache, self).__init__(*paths)
130 for path in paths:
130 for path in paths:
131 _cachedfiles.add((path, b''))
131 _cachedfiles.add((path, b''))
132
132
133 def join(self, obj, fname):
133 def join(self, obj, fname):
134 return obj.sjoin(fname)
134 return obj.sjoin(fname)
135
135
136
136
137 class mixedrepostorecache(_basefilecache):
137 class mixedrepostorecache(_basefilecache):
138 """filecache for a mix files in .hg/store and outside"""
138 """filecache for a mix files in .hg/store and outside"""
139
139
140 def __init__(self, *pathsandlocations):
140 def __init__(self, *pathsandlocations):
141 # scmutil.filecache only uses the path for passing back into our
141 # scmutil.filecache only uses the path for passing back into our
142 # join(), so we can safely pass a list of paths and locations
142 # join(), so we can safely pass a list of paths and locations
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
145
145
146 def join(self, obj, fnameandlocation):
146 def join(self, obj, fnameandlocation):
147 fname, location = fnameandlocation
147 fname, location = fnameandlocation
148 if location == b'plain':
148 if location == b'plain':
149 return obj.vfs.join(fname)
149 return obj.vfs.join(fname)
150 else:
150 else:
151 if location != b'':
151 if location != b'':
152 raise error.ProgrammingError(
152 raise error.ProgrammingError(
153 b'unexpected location: %s' % location
153 b'unexpected location: %s' % location
154 )
154 )
155 return obj.sjoin(fname)
155 return obj.sjoin(fname)
156
156
157
157
158 def isfilecached(repo, name):
158 def isfilecached(repo, name):
159 """check if a repo has already cached "name" filecache-ed property
159 """check if a repo has already cached "name" filecache-ed property
160
160
161 This returns (cachedobj-or-None, iscached) tuple.
161 This returns (cachedobj-or-None, iscached) tuple.
162 """
162 """
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 if not cacheentry:
164 if not cacheentry:
165 return None, False
165 return None, False
166 return cacheentry.obj, True
166 return cacheentry.obj, True
167
167
168
168
169 class unfilteredpropertycache(util.propertycache):
169 class unfilteredpropertycache(util.propertycache):
170 """propertycache that apply to unfiltered repo only"""
170 """propertycache that apply to unfiltered repo only"""
171
171
172 def __get__(self, repo, type=None):
172 def __get__(self, repo, type=None):
173 unfi = repo.unfiltered()
173 unfi = repo.unfiltered()
174 if unfi is repo:
174 if unfi is repo:
175 return super(unfilteredpropertycache, self).__get__(unfi)
175 return super(unfilteredpropertycache, self).__get__(unfi)
176 return getattr(unfi, self.name)
176 return getattr(unfi, self.name)
177
177
178
178
179 class filteredpropertycache(util.propertycache):
179 class filteredpropertycache(util.propertycache):
180 """propertycache that must take filtering in account"""
180 """propertycache that must take filtering in account"""
181
181
182 def cachevalue(self, obj, value):
182 def cachevalue(self, obj, value):
183 object.__setattr__(obj, self.name, value)
183 object.__setattr__(obj, self.name, value)
184
184
185
185
186 def hasunfilteredcache(repo, name):
186 def hasunfilteredcache(repo, name):
187 """check if a repo has an unfilteredpropertycache value for <name>"""
187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 return name in vars(repo.unfiltered())
188 return name in vars(repo.unfiltered())
189
189
190
190
191 def unfilteredmethod(orig):
191 def unfilteredmethod(orig):
192 """decorate method that always need to be run on unfiltered version"""
192 """decorate method that always need to be run on unfiltered version"""
193
193
194 def wrapper(repo, *args, **kwargs):
194 def wrapper(repo, *args, **kwargs):
195 return orig(repo.unfiltered(), *args, **kwargs)
195 return orig(repo.unfiltered(), *args, **kwargs)
196
196
197 return wrapper
197 return wrapper
198
198
199
199
200 moderncaps = {
200 moderncaps = {
201 b'lookup',
201 b'lookup',
202 b'branchmap',
202 b'branchmap',
203 b'pushkey',
203 b'pushkey',
204 b'known',
204 b'known',
205 b'getbundle',
205 b'getbundle',
206 b'unbundle',
206 b'unbundle',
207 }
207 }
208 legacycaps = moderncaps.union({b'changegroupsubset'})
208 legacycaps = moderncaps.union({b'changegroupsubset'})
209
209
210
210
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 class localcommandexecutor(object):
212 class localcommandexecutor(object):
213 def __init__(self, peer):
213 def __init__(self, peer):
214 self._peer = peer
214 self._peer = peer
215 self._sent = False
215 self._sent = False
216 self._closed = False
216 self._closed = False
217
217
218 def __enter__(self):
218 def __enter__(self):
219 return self
219 return self
220
220
221 def __exit__(self, exctype, excvalue, exctb):
221 def __exit__(self, exctype, excvalue, exctb):
222 self.close()
222 self.close()
223
223
224 def callcommand(self, command, args):
224 def callcommand(self, command, args):
225 if self._sent:
225 if self._sent:
226 raise error.ProgrammingError(
226 raise error.ProgrammingError(
227 b'callcommand() cannot be used after sendcommands()'
227 b'callcommand() cannot be used after sendcommands()'
228 )
228 )
229
229
230 if self._closed:
230 if self._closed:
231 raise error.ProgrammingError(
231 raise error.ProgrammingError(
232 b'callcommand() cannot be used after close()'
232 b'callcommand() cannot be used after close()'
233 )
233 )
234
234
235 # We don't need to support anything fancy. Just call the named
235 # We don't need to support anything fancy. Just call the named
236 # method on the peer and return a resolved future.
236 # method on the peer and return a resolved future.
237 fn = getattr(self._peer, pycompat.sysstr(command))
237 fn = getattr(self._peer, pycompat.sysstr(command))
238
238
239 f = pycompat.futures.Future()
239 f = pycompat.futures.Future()
240
240
241 try:
241 try:
242 result = fn(**pycompat.strkwargs(args))
242 result = fn(**pycompat.strkwargs(args))
243 except Exception:
243 except Exception:
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 else:
245 else:
246 f.set_result(result)
246 f.set_result(result)
247
247
248 return f
248 return f
249
249
250 def sendcommands(self):
250 def sendcommands(self):
251 self._sent = True
251 self._sent = True
252
252
253 def close(self):
253 def close(self):
254 self._closed = True
254 self._closed = True
255
255
256
256
257 @interfaceutil.implementer(repository.ipeercommands)
257 @interfaceutil.implementer(repository.ipeercommands)
258 class localpeer(repository.peer):
258 class localpeer(repository.peer):
259 '''peer for a local repo; reflects only the most recent API'''
259 '''peer for a local repo; reflects only the most recent API'''
260
260
261 def __init__(self, repo, caps=None):
261 def __init__(self, repo, caps=None):
262 super(localpeer, self).__init__()
262 super(localpeer, self).__init__()
263
263
264 if caps is None:
264 if caps is None:
265 caps = moderncaps.copy()
265 caps = moderncaps.copy()
266 self._repo = repo.filtered(b'served')
266 self._repo = repo.filtered(b'served')
267 self.ui = repo.ui
267 self.ui = repo.ui
268 self._caps = repo._restrictcapabilities(caps)
268 self._caps = repo._restrictcapabilities(caps)
269
269
270 # Begin of _basepeer interface.
270 # Begin of _basepeer interface.
271
271
272 def url(self):
272 def url(self):
273 return self._repo.url()
273 return self._repo.url()
274
274
275 def local(self):
275 def local(self):
276 return self._repo
276 return self._repo
277
277
278 def peer(self):
278 def peer(self):
279 return self
279 return self
280
280
281 def canpush(self):
281 def canpush(self):
282 return True
282 return True
283
283
284 def close(self):
284 def close(self):
285 self._repo.close()
285 self._repo.close()
286
286
287 # End of _basepeer interface.
287 # End of _basepeer interface.
288
288
289 # Begin of _basewirecommands interface.
289 # Begin of _basewirecommands interface.
290
290
291 def branchmap(self):
291 def branchmap(self):
292 return self._repo.branchmap()
292 return self._repo.branchmap()
293
293
294 def capabilities(self):
294 def capabilities(self):
295 return self._caps
295 return self._caps
296
296
297 def clonebundles(self):
297 def clonebundles(self):
298 return self._repo.tryread(b'clonebundles.manifest')
298 return self._repo.tryread(b'clonebundles.manifest')
299
299
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 """Used to test argument passing over the wire"""
301 """Used to test argument passing over the wire"""
302 return b"%s %s %s %s %s" % (
302 return b"%s %s %s %s %s" % (
303 one,
303 one,
304 two,
304 two,
305 pycompat.bytestr(three),
305 pycompat.bytestr(three),
306 pycompat.bytestr(four),
306 pycompat.bytestr(four),
307 pycompat.bytestr(five),
307 pycompat.bytestr(five),
308 )
308 )
309
309
310 def getbundle(
310 def getbundle(
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 ):
312 ):
313 chunks = exchange.getbundlechunks(
313 chunks = exchange.getbundlechunks(
314 self._repo,
314 self._repo,
315 source,
315 source,
316 heads=heads,
316 heads=heads,
317 common=common,
317 common=common,
318 bundlecaps=bundlecaps,
318 bundlecaps=bundlecaps,
319 **kwargs
319 **kwargs
320 )[1]
320 )[1]
321 cb = util.chunkbuffer(chunks)
321 cb = util.chunkbuffer(chunks)
322
322
323 if exchange.bundle2requested(bundlecaps):
323 if exchange.bundle2requested(bundlecaps):
324 # When requesting a bundle2, getbundle returns a stream to make the
324 # When requesting a bundle2, getbundle returns a stream to make the
325 # wire level function happier. We need to build a proper object
325 # wire level function happier. We need to build a proper object
326 # from it in local peer.
326 # from it in local peer.
327 return bundle2.getunbundler(self.ui, cb)
327 return bundle2.getunbundler(self.ui, cb)
328 else:
328 else:
329 return changegroup.getunbundler(b'01', cb, None)
329 return changegroup.getunbundler(b'01', cb, None)
330
330
331 def heads(self):
331 def heads(self):
332 return self._repo.heads()
332 return self._repo.heads()
333
333
334 def known(self, nodes):
334 def known(self, nodes):
335 return self._repo.known(nodes)
335 return self._repo.known(nodes)
336
336
337 def listkeys(self, namespace):
337 def listkeys(self, namespace):
338 return self._repo.listkeys(namespace)
338 return self._repo.listkeys(namespace)
339
339
340 def lookup(self, key):
340 def lookup(self, key):
341 return self._repo.lookup(key)
341 return self._repo.lookup(key)
342
342
343 def pushkey(self, namespace, key, old, new):
343 def pushkey(self, namespace, key, old, new):
344 return self._repo.pushkey(namespace, key, old, new)
344 return self._repo.pushkey(namespace, key, old, new)
345
345
346 def stream_out(self):
346 def stream_out(self):
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348
348
349 def unbundle(self, bundle, heads, url):
349 def unbundle(self, bundle, heads, url):
350 """apply a bundle on a repo
350 """apply a bundle on a repo
351
351
352 This function handles the repo locking itself."""
352 This function handles the repo locking itself."""
353 try:
353 try:
354 try:
354 try:
355 bundle = exchange.readbundle(self.ui, bundle, None)
355 bundle = exchange.readbundle(self.ui, bundle, None)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 if util.safehasattr(ret, b'getchunks'):
357 if util.safehasattr(ret, b'getchunks'):
358 # This is a bundle20 object, turn it into an unbundler.
358 # This is a bundle20 object, turn it into an unbundler.
359 # This little dance should be dropped eventually when the
359 # This little dance should be dropped eventually when the
360 # API is finally improved.
360 # API is finally improved.
361 stream = util.chunkbuffer(ret.getchunks())
361 stream = util.chunkbuffer(ret.getchunks())
362 ret = bundle2.getunbundler(self.ui, stream)
362 ret = bundle2.getunbundler(self.ui, stream)
363 return ret
363 return ret
364 except Exception as exc:
364 except Exception as exc:
365 # If the exception contains output salvaged from a bundle2
365 # If the exception contains output salvaged from a bundle2
366 # reply, we need to make sure it is printed before continuing
366 # reply, we need to make sure it is printed before continuing
367 # to fail. So we build a bundle2 with such output and consume
367 # to fail. So we build a bundle2 with such output and consume
368 # it directly.
368 # it directly.
369 #
369 #
370 # This is not very elegant but allows a "simple" solution for
370 # This is not very elegant but allows a "simple" solution for
371 # issue4594
371 # issue4594
372 output = getattr(exc, '_bundle2salvagedoutput', ())
372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 if output:
373 if output:
374 bundler = bundle2.bundle20(self._repo.ui)
374 bundler = bundle2.bundle20(self._repo.ui)
375 for out in output:
375 for out in output:
376 bundler.addpart(out)
376 bundler.addpart(out)
377 stream = util.chunkbuffer(bundler.getchunks())
377 stream = util.chunkbuffer(bundler.getchunks())
378 b = bundle2.getunbundler(self.ui, stream)
378 b = bundle2.getunbundler(self.ui, stream)
379 bundle2.processbundle(self._repo, b)
379 bundle2.processbundle(self._repo, b)
380 raise
380 raise
381 except error.PushRaced as exc:
381 except error.PushRaced as exc:
382 raise error.ResponseError(
382 raise error.ResponseError(
383 _(b'push failed:'), stringutil.forcebytestr(exc)
383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 )
384 )
385
385
386 # End of _basewirecommands interface.
386 # End of _basewirecommands interface.
387
387
388 # Begin of peer interface.
388 # Begin of peer interface.
389
389
390 def commandexecutor(self):
390 def commandexecutor(self):
391 return localcommandexecutor(self)
391 return localcommandexecutor(self)
392
392
393 # End of peer interface.
393 # End of peer interface.
394
394
395
395
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 class locallegacypeer(localpeer):
397 class locallegacypeer(localpeer):
398 '''peer extension which implements legacy methods too; used for tests with
398 '''peer extension which implements legacy methods too; used for tests with
399 restricted capabilities'''
399 restricted capabilities'''
400
400
401 def __init__(self, repo):
401 def __init__(self, repo):
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403
403
404 # Begin of baselegacywirecommands interface.
404 # Begin of baselegacywirecommands interface.
405
405
406 def between(self, pairs):
406 def between(self, pairs):
407 return self._repo.between(pairs)
407 return self._repo.between(pairs)
408
408
409 def branches(self, nodes):
409 def branches(self, nodes):
410 return self._repo.branches(nodes)
410 return self._repo.branches(nodes)
411
411
412 def changegroup(self, nodes, source):
412 def changegroup(self, nodes, source):
413 outgoing = discovery.outgoing(
413 outgoing = discovery.outgoing(
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 )
415 )
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417
417
418 def changegroupsubset(self, bases, heads, source):
418 def changegroupsubset(self, bases, heads, source):
419 outgoing = discovery.outgoing(
419 outgoing = discovery.outgoing(
420 self._repo, missingroots=bases, missingheads=heads
420 self._repo, missingroots=bases, missingheads=heads
421 )
421 )
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423
423
424 # End of baselegacywirecommands interface.
424 # End of baselegacywirecommands interface.
425
425
426
426
427 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # clients.
428 # clients.
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430
430
431 # A repository with the sparserevlog feature will have delta chains that
431 # A repository with the sparserevlog feature will have delta chains that
432 # can spread over a larger span. Sparse reading cuts these large spans into
432 # can spread over a larger span. Sparse reading cuts these large spans into
433 # pieces, so that each piece isn't too big.
433 # pieces, so that each piece isn't too big.
434 # Without the sparserevlog capability, reading from the repository could use
434 # Without the sparserevlog capability, reading from the repository could use
435 # huge amounts of memory, because the whole span would be read at once,
435 # huge amounts of memory, because the whole span would be read at once,
436 # including all the intermediate revisions that aren't pertinent for the chain.
436 # including all the intermediate revisions that aren't pertinent for the chain.
437 # This is why once a repository has enabled sparse-read, it becomes required.
437 # This is why once a repository has enabled sparse-read, it becomes required.
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439
439
440 # A repository with the sidedataflag requirement will allow to store extra
440 # A repository with the sidedataflag requirement will allow to store extra
441 # information for revision without altering their original hashes.
441 # information for revision without altering their original hashes.
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443
443
444 # A repository with the the copies-sidedata-changeset requirement will store
444 # A repository with the the copies-sidedata-changeset requirement will store
445 # copies related information in changeset's sidedata.
445 # copies related information in changeset's sidedata.
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447
447
448 # Functions receiving (ui, features) that extensions can register to impact
448 # Functions receiving (ui, features) that extensions can register to impact
449 # the ability to load repositories with custom requirements. Only
449 # the ability to load repositories with custom requirements. Only
450 # functions defined in loaded extensions are called.
450 # functions defined in loaded extensions are called.
451 #
451 #
452 # The function receives a set of requirement strings that the repository
452 # The function receives a set of requirement strings that the repository
453 # is capable of opening. Functions will typically add elements to the
453 # is capable of opening. Functions will typically add elements to the
454 # set to reflect that the extension knows how to handle that requirements.
454 # set to reflect that the extension knows how to handle that requirements.
455 featuresetupfuncs = set()
455 featuresetupfuncs = set()
456
456
457
457
458 def makelocalrepository(baseui, path, intents=None):
458 def makelocalrepository(baseui, path, intents=None):
459 """Create a local repository object.
459 """Create a local repository object.
460
460
461 Given arguments needed to construct a local repository, this function
461 Given arguments needed to construct a local repository, this function
462 performs various early repository loading functionality (such as
462 performs various early repository loading functionality (such as
463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
464 the repository can be opened, derives a type suitable for representing
464 the repository can be opened, derives a type suitable for representing
465 that repository, and returns an instance of it.
465 that repository, and returns an instance of it.
466
466
467 The returned object conforms to the ``repository.completelocalrepository``
467 The returned object conforms to the ``repository.completelocalrepository``
468 interface.
468 interface.
469
469
470 The repository type is derived by calling a series of factory functions
470 The repository type is derived by calling a series of factory functions
471 for each aspect/interface of the final repository. These are defined by
471 for each aspect/interface of the final repository. These are defined by
472 ``REPO_INTERFACES``.
472 ``REPO_INTERFACES``.
473
473
474 Each factory function is called to produce a type implementing a specific
474 Each factory function is called to produce a type implementing a specific
475 interface. The cumulative list of returned types will be combined into a
475 interface. The cumulative list of returned types will be combined into a
476 new type and that type will be instantiated to represent the local
476 new type and that type will be instantiated to represent the local
477 repository.
477 repository.
478
478
479 The factory functions each receive various state that may be consulted
479 The factory functions each receive various state that may be consulted
480 as part of deriving a type.
480 as part of deriving a type.
481
481
482 Extensions should wrap these factory functions to customize repository type
482 Extensions should wrap these factory functions to customize repository type
483 creation. Note that an extension's wrapped function may be called even if
483 creation. Note that an extension's wrapped function may be called even if
484 that extension is not loaded for the repo being constructed. Extensions
484 that extension is not loaded for the repo being constructed. Extensions
485 should check if their ``__name__`` appears in the
485 should check if their ``__name__`` appears in the
486 ``extensionmodulenames`` set passed to the factory function and no-op if
486 ``extensionmodulenames`` set passed to the factory function and no-op if
487 not.
487 not.
488 """
488 """
489 ui = baseui.copy()
489 ui = baseui.copy()
490 # Prevent copying repo configuration.
490 # Prevent copying repo configuration.
491 ui.copy = baseui.copy
491 ui.copy = baseui.copy
492
492
493 # Working directory VFS rooted at repository root.
493 # Working directory VFS rooted at repository root.
494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
495
495
496 # Main VFS for .hg/ directory.
496 # Main VFS for .hg/ directory.
497 hgpath = wdirvfs.join(b'.hg')
497 hgpath = wdirvfs.join(b'.hg')
498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
499
499
500 # The .hg/ path should exist and should be a directory. All other
500 # The .hg/ path should exist and should be a directory. All other
501 # cases are errors.
501 # cases are errors.
502 if not hgvfs.isdir():
502 if not hgvfs.isdir():
503 try:
503 try:
504 hgvfs.stat()
504 hgvfs.stat()
505 except OSError as e:
505 except OSError as e:
506 if e.errno != errno.ENOENT:
506 if e.errno != errno.ENOENT:
507 raise
507 raise
508
508
509 raise error.RepoError(_(b'repository %s not found') % path)
509 raise error.RepoError(_(b'repository %s not found') % path)
510
510
511 # .hg/requires file contains a newline-delimited list of
511 # .hg/requires file contains a newline-delimited list of
512 # features/capabilities the opener (us) must have in order to use
512 # features/capabilities the opener (us) must have in order to use
513 # the repository. This file was introduced in Mercurial 0.9.2,
513 # the repository. This file was introduced in Mercurial 0.9.2,
514 # which means very old repositories may not have one. We assume
514 # which means very old repositories may not have one. We assume
515 # a missing file translates to no requirements.
515 # a missing file translates to no requirements.
516 try:
516 try:
517 requirements = set(hgvfs.read(b'requires').splitlines())
517 requirements = set(hgvfs.read(b'requires').splitlines())
518 except IOError as e:
518 except IOError as e:
519 if e.errno != errno.ENOENT:
519 if e.errno != errno.ENOENT:
520 raise
520 raise
521 requirements = set()
521 requirements = set()
522
522
523 # The .hg/hgrc file may load extensions or contain config options
523 # The .hg/hgrc file may load extensions or contain config options
524 # that influence repository construction. Attempt to load it and
524 # that influence repository construction. Attempt to load it and
525 # process any new extensions that it may have pulled in.
525 # process any new extensions that it may have pulled in.
526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
528 extensions.loadall(ui)
528 extensions.loadall(ui)
529 extensions.populateui(ui)
529 extensions.populateui(ui)
530
530
531 # Set of module names of extensions loaded for this repository.
531 # Set of module names of extensions loaded for this repository.
532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
533
533
534 supportedrequirements = gathersupportedrequirements(ui)
534 supportedrequirements = gathersupportedrequirements(ui)
535
535
536 # We first validate the requirements are known.
536 # We first validate the requirements are known.
537 ensurerequirementsrecognized(requirements, supportedrequirements)
537 ensurerequirementsrecognized(requirements, supportedrequirements)
538
538
539 # Then we validate that the known set is reasonable to use together.
539 # Then we validate that the known set is reasonable to use together.
540 ensurerequirementscompatible(ui, requirements)
540 ensurerequirementscompatible(ui, requirements)
541
541
542 # TODO there are unhandled edge cases related to opening repositories with
542 # TODO there are unhandled edge cases related to opening repositories with
543 # shared storage. If storage is shared, we should also test for requirements
543 # shared storage. If storage is shared, we should also test for requirements
544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
545 # that repo, as that repo may load extensions needed to open it. This is a
545 # that repo, as that repo may load extensions needed to open it. This is a
546 # bit complicated because we don't want the other hgrc to overwrite settings
546 # bit complicated because we don't want the other hgrc to overwrite settings
547 # in this hgrc.
547 # in this hgrc.
548 #
548 #
549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
550 # file when sharing repos. But if a requirement is added after the share is
550 # file when sharing repos. But if a requirement is added after the share is
551 # performed, thereby introducing a new requirement for the opener, we may
551 # performed, thereby introducing a new requirement for the opener, we may
552 # will not see that and could encounter a run-time error interacting with
552 # will not see that and could encounter a run-time error interacting with
553 # that shared store since it has an unknown-to-us requirement.
553 # that shared store since it has an unknown-to-us requirement.
554
554
555 # At this point, we know we should be capable of opening the repository.
555 # At this point, we know we should be capable of opening the repository.
556 # Now get on with doing that.
556 # Now get on with doing that.
557
557
558 features = set()
558 features = set()
559
559
560 # The "store" part of the repository holds versioned data. How it is
560 # The "store" part of the repository holds versioned data. How it is
561 # accessed is determined by various requirements. The ``shared`` or
561 # accessed is determined by various requirements. The ``shared`` or
562 # ``relshared`` requirements indicate the store lives in the path contained
562 # ``relshared`` requirements indicate the store lives in the path contained
563 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 # in the ``.hg/sharedpath`` file. This is an absolute path for
564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
565 if b'shared' in requirements or b'relshared' in requirements:
565 if b'shared' in requirements or b'relshared' in requirements:
566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
567 if b'relshared' in requirements:
567 if b'relshared' in requirements:
568 sharedpath = hgvfs.join(sharedpath)
568 sharedpath = hgvfs.join(sharedpath)
569
569
570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
571
571
572 if not sharedvfs.exists():
572 if not sharedvfs.exists():
573 raise error.RepoError(
573 raise error.RepoError(
574 _(b'.hg/sharedpath points to nonexistent directory %s')
574 _(b'.hg/sharedpath points to nonexistent directory %s')
575 % sharedvfs.base
575 % sharedvfs.base
576 )
576 )
577
577
578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
579
579
580 storebasepath = sharedvfs.base
580 storebasepath = sharedvfs.base
581 cachepath = sharedvfs.join(b'cache')
581 cachepath = sharedvfs.join(b'cache')
582 else:
582 else:
583 storebasepath = hgvfs.base
583 storebasepath = hgvfs.base
584 cachepath = hgvfs.join(b'cache')
584 cachepath = hgvfs.join(b'cache')
585 wcachepath = hgvfs.join(b'wcache')
585 wcachepath = hgvfs.join(b'wcache')
586
586
587 # The store has changed over time and the exact layout is dictated by
587 # The store has changed over time and the exact layout is dictated by
588 # requirements. The store interface abstracts differences across all
588 # requirements. The store interface abstracts differences across all
589 # of them.
589 # of them.
590 store = makestore(
590 store = makestore(
591 requirements,
591 requirements,
592 storebasepath,
592 storebasepath,
593 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 lambda base: vfsmod.vfs(base, cacheaudited=True),
594 )
594 )
595 hgvfs.createmode = store.createmode
595 hgvfs.createmode = store.createmode
596
596
597 storevfs = store.vfs
597 storevfs = store.vfs
598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
599
599
600 # The cache vfs is used to manage cache files.
600 # The cache vfs is used to manage cache files.
601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
602 cachevfs.createmode = store.createmode
602 cachevfs.createmode = store.createmode
603 # The cache vfs is used to manage cache files related to the working copy
603 # The cache vfs is used to manage cache files related to the working copy
604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
605 wcachevfs.createmode = store.createmode
605 wcachevfs.createmode = store.createmode
606
606
607 # Now resolve the type for the repository object. We do this by repeatedly
607 # Now resolve the type for the repository object. We do this by repeatedly
608 # calling a factory function to produces types for specific aspects of the
608 # calling a factory function to produces types for specific aspects of the
609 # repo's operation. The aggregate returned types are used as base classes
609 # repo's operation. The aggregate returned types are used as base classes
610 # for a dynamically-derived type, which will represent our new repository.
610 # for a dynamically-derived type, which will represent our new repository.
611
611
612 bases = []
612 bases = []
613 extrastate = {}
613 extrastate = {}
614
614
615 for iface, fn in REPO_INTERFACES:
615 for iface, fn in REPO_INTERFACES:
616 # We pass all potentially useful state to give extensions tons of
616 # We pass all potentially useful state to give extensions tons of
617 # flexibility.
617 # flexibility.
618 typ = fn()(
618 typ = fn()(
619 ui=ui,
619 ui=ui,
620 intents=intents,
620 intents=intents,
621 requirements=requirements,
621 requirements=requirements,
622 features=features,
622 features=features,
623 wdirvfs=wdirvfs,
623 wdirvfs=wdirvfs,
624 hgvfs=hgvfs,
624 hgvfs=hgvfs,
625 store=store,
625 store=store,
626 storevfs=storevfs,
626 storevfs=storevfs,
627 storeoptions=storevfs.options,
627 storeoptions=storevfs.options,
628 cachevfs=cachevfs,
628 cachevfs=cachevfs,
629 wcachevfs=wcachevfs,
629 wcachevfs=wcachevfs,
630 extensionmodulenames=extensionmodulenames,
630 extensionmodulenames=extensionmodulenames,
631 extrastate=extrastate,
631 extrastate=extrastate,
632 baseclasses=bases,
632 baseclasses=bases,
633 )
633 )
634
634
635 if not isinstance(typ, type):
635 if not isinstance(typ, type):
636 raise error.ProgrammingError(
636 raise error.ProgrammingError(
637 b'unable to construct type for %s' % iface
637 b'unable to construct type for %s' % iface
638 )
638 )
639
639
640 bases.append(typ)
640 bases.append(typ)
641
641
642 # type() allows you to use characters in type names that wouldn't be
642 # type() allows you to use characters in type names that wouldn't be
643 # recognized as Python symbols in source code. We abuse that to add
643 # recognized as Python symbols in source code. We abuse that to add
644 # rich information about our constructed repo.
644 # rich information about our constructed repo.
645 name = pycompat.sysstr(
645 name = pycompat.sysstr(
646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
647 )
647 )
648
648
649 cls = type(name, tuple(bases), {})
649 cls = type(name, tuple(bases), {})
650
650
651 return cls(
651 return cls(
652 baseui=baseui,
652 baseui=baseui,
653 ui=ui,
653 ui=ui,
654 origroot=path,
654 origroot=path,
655 wdirvfs=wdirvfs,
655 wdirvfs=wdirvfs,
656 hgvfs=hgvfs,
656 hgvfs=hgvfs,
657 requirements=requirements,
657 requirements=requirements,
658 supportedrequirements=supportedrequirements,
658 supportedrequirements=supportedrequirements,
659 sharedpath=storebasepath,
659 sharedpath=storebasepath,
660 store=store,
660 store=store,
661 cachevfs=cachevfs,
661 cachevfs=cachevfs,
662 wcachevfs=wcachevfs,
662 wcachevfs=wcachevfs,
663 features=features,
663 features=features,
664 intents=intents,
664 intents=intents,
665 )
665 )
666
666
667
667
668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
669 """Load hgrc files/content into a ui instance.
669 """Load hgrc files/content into a ui instance.
670
670
671 This is called during repository opening to load any additional
671 This is called during repository opening to load any additional
672 config files or settings relevant to the current repository.
672 config files or settings relevant to the current repository.
673
673
674 Returns a bool indicating whether any additional configs were loaded.
674 Returns a bool indicating whether any additional configs were loaded.
675
675
676 Extensions should monkeypatch this function to modify how per-repo
676 Extensions should monkeypatch this function to modify how per-repo
677 configs are loaded. For example, an extension may wish to pull in
677 configs are loaded. For example, an extension may wish to pull in
678 configs from alternate files or sources.
678 configs from alternate files or sources.
679 """
679 """
680 if not rcutil.use_repo_hgrc():
680 if not rcutil.use_repo_hgrc():
681 return False
681 return False
682 try:
682 try:
683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
684 return True
684 return True
685 except IOError:
685 except IOError:
686 return False
686 return False
687
687
688
688
689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
690 """Perform additional actions after .hg/hgrc is loaded.
690 """Perform additional actions after .hg/hgrc is loaded.
691
691
692 This function is called during repository loading immediately after
692 This function is called during repository loading immediately after
693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
694
694
695 The function can be used to validate configs, automatically add
695 The function can be used to validate configs, automatically add
696 options (including extensions) based on requirements, etc.
696 options (including extensions) based on requirements, etc.
697 """
697 """
698
698
699 # Map of requirements to list of extensions to load automatically when
699 # Map of requirements to list of extensions to load automatically when
700 # requirement is present.
700 # requirement is present.
701 autoextensions = {
701 autoextensions = {
702 b'git': [b'git'],
702 b'git': [b'git'],
703 b'largefiles': [b'largefiles'],
703 b'largefiles': [b'largefiles'],
704 b'lfs': [b'lfs'],
704 b'lfs': [b'lfs'],
705 }
705 }
706
706
707 for requirement, names in sorted(autoextensions.items()):
707 for requirement, names in sorted(autoextensions.items()):
708 if requirement not in requirements:
708 if requirement not in requirements:
709 continue
709 continue
710
710
711 for name in names:
711 for name in names:
712 if not ui.hasconfig(b'extensions', name):
712 if not ui.hasconfig(b'extensions', name):
713 ui.setconfig(b'extensions', name, b'', source=b'autoload')
713 ui.setconfig(b'extensions', name, b'', source=b'autoload')
714
714
715
715
716 def gathersupportedrequirements(ui):
716 def gathersupportedrequirements(ui):
717 """Determine the complete set of recognized requirements."""
717 """Determine the complete set of recognized requirements."""
718 # Start with all requirements supported by this file.
718 # Start with all requirements supported by this file.
719 supported = set(localrepository._basesupported)
719 supported = set(localrepository._basesupported)
720
720
721 # Execute ``featuresetupfuncs`` entries if they belong to an extension
721 # Execute ``featuresetupfuncs`` entries if they belong to an extension
722 # relevant to this ui instance.
722 # relevant to this ui instance.
723 modules = {m.__name__ for n, m in extensions.extensions(ui)}
723 modules = {m.__name__ for n, m in extensions.extensions(ui)}
724
724
725 for fn in featuresetupfuncs:
725 for fn in featuresetupfuncs:
726 if fn.__module__ in modules:
726 if fn.__module__ in modules:
727 fn(ui, supported)
727 fn(ui, supported)
728
728
729 # Add derived requirements from registered compression engines.
729 # Add derived requirements from registered compression engines.
730 for name in util.compengines:
730 for name in util.compengines:
731 engine = util.compengines[name]
731 engine = util.compengines[name]
732 if engine.available() and engine.revlogheader():
732 if engine.available() and engine.revlogheader():
733 supported.add(b'exp-compression-%s' % name)
733 supported.add(b'exp-compression-%s' % name)
734 if engine.name() == b'zstd':
734 if engine.name() == b'zstd':
735 supported.add(b'revlog-compression-zstd')
735 supported.add(b'revlog-compression-zstd')
736
736
737 return supported
737 return supported
738
738
739
739
740 def ensurerequirementsrecognized(requirements, supported):
740 def ensurerequirementsrecognized(requirements, supported):
741 """Validate that a set of local requirements is recognized.
741 """Validate that a set of local requirements is recognized.
742
742
743 Receives a set of requirements. Raises an ``error.RepoError`` if there
743 Receives a set of requirements. Raises an ``error.RepoError`` if there
744 exists any requirement in that set that currently loaded code doesn't
744 exists any requirement in that set that currently loaded code doesn't
745 recognize.
745 recognize.
746
746
747 Returns a set of supported requirements.
747 Returns a set of supported requirements.
748 """
748 """
749 missing = set()
749 missing = set()
750
750
751 for requirement in requirements:
751 for requirement in requirements:
752 if requirement in supported:
752 if requirement in supported:
753 continue
753 continue
754
754
755 if not requirement or not requirement[0:1].isalnum():
755 if not requirement or not requirement[0:1].isalnum():
756 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
756 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
757
757
758 missing.add(requirement)
758 missing.add(requirement)
759
759
760 if missing:
760 if missing:
761 raise error.RequirementError(
761 raise error.RequirementError(
762 _(b'repository requires features unknown to this Mercurial: %s')
762 _(b'repository requires features unknown to this Mercurial: %s')
763 % b' '.join(sorted(missing)),
763 % b' '.join(sorted(missing)),
764 hint=_(
764 hint=_(
765 b'see https://mercurial-scm.org/wiki/MissingRequirement '
765 b'see https://mercurial-scm.org/wiki/MissingRequirement '
766 b'for more information'
766 b'for more information'
767 ),
767 ),
768 )
768 )
769
769
770
770
771 def ensurerequirementscompatible(ui, requirements):
771 def ensurerequirementscompatible(ui, requirements):
772 """Validates that a set of recognized requirements is mutually compatible.
772 """Validates that a set of recognized requirements is mutually compatible.
773
773
774 Some requirements may not be compatible with others or require
774 Some requirements may not be compatible with others or require
775 config options that aren't enabled. This function is called during
775 config options that aren't enabled. This function is called during
776 repository opening to ensure that the set of requirements needed
776 repository opening to ensure that the set of requirements needed
777 to open a repository is sane and compatible with config options.
777 to open a repository is sane and compatible with config options.
778
778
779 Extensions can monkeypatch this function to perform additional
779 Extensions can monkeypatch this function to perform additional
780 checking.
780 checking.
781
781
782 ``error.RepoError`` should be raised on failure.
782 ``error.RepoError`` should be raised on failure.
783 """
783 """
784 if b'exp-sparse' in requirements and not sparse.enabled:
784 if b'exp-sparse' in requirements and not sparse.enabled:
785 raise error.RepoError(
785 raise error.RepoError(
786 _(
786 _(
787 b'repository is using sparse feature but '
787 b'repository is using sparse feature but '
788 b'sparse is not enabled; enable the '
788 b'sparse is not enabled; enable the '
789 b'"sparse" extensions to access'
789 b'"sparse" extensions to access'
790 )
790 )
791 )
791 )
792
792
793
793
794 def makestore(requirements, path, vfstype):
794 def makestore(requirements, path, vfstype):
795 """Construct a storage object for a repository."""
795 """Construct a storage object for a repository."""
796 if b'store' in requirements:
796 if b'store' in requirements:
797 if b'fncache' in requirements:
797 if b'fncache' in requirements:
798 return storemod.fncachestore(
798 return storemod.fncachestore(
799 path, vfstype, b'dotencode' in requirements
799 path, vfstype, b'dotencode' in requirements
800 )
800 )
801
801
802 return storemod.encodedstore(path, vfstype)
802 return storemod.encodedstore(path, vfstype)
803
803
804 return storemod.basicstore(path, vfstype)
804 return storemod.basicstore(path, vfstype)
805
805
806
806
807 def resolvestorevfsoptions(ui, requirements, features):
807 def resolvestorevfsoptions(ui, requirements, features):
808 """Resolve the options to pass to the store vfs opener.
808 """Resolve the options to pass to the store vfs opener.
809
809
810 The returned dict is used to influence behavior of the storage layer.
810 The returned dict is used to influence behavior of the storage layer.
811 """
811 """
812 options = {}
812 options = {}
813
813
814 if b'treemanifest' in requirements:
814 if b'treemanifest' in requirements:
815 options[b'treemanifest'] = True
815 options[b'treemanifest'] = True
816
816
817 # experimental config: format.manifestcachesize
817 # experimental config: format.manifestcachesize
818 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
818 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
819 if manifestcachesize is not None:
819 if manifestcachesize is not None:
820 options[b'manifestcachesize'] = manifestcachesize
820 options[b'manifestcachesize'] = manifestcachesize
821
821
822 # In the absence of another requirement superseding a revlog-related
822 # In the absence of another requirement superseding a revlog-related
823 # requirement, we have to assume the repo is using revlog version 0.
823 # requirement, we have to assume the repo is using revlog version 0.
824 # This revlog format is super old and we don't bother trying to parse
824 # This revlog format is super old and we don't bother trying to parse
825 # opener options for it because those options wouldn't do anything
825 # opener options for it because those options wouldn't do anything
826 # meaningful on such old repos.
826 # meaningful on such old repos.
827 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
827 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
828 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
828 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
829 else: # explicitly mark repo as using revlogv0
829 else: # explicitly mark repo as using revlogv0
830 options[b'revlogv0'] = True
830 options[b'revlogv0'] = True
831
831
832 if COPIESSDC_REQUIREMENT in requirements:
832 if COPIESSDC_REQUIREMENT in requirements:
833 options[b'copies-storage'] = b'changeset-sidedata'
833 options[b'copies-storage'] = b'changeset-sidedata'
834 else:
834 else:
835 writecopiesto = ui.config(b'experimental', b'copies.write-to')
835 writecopiesto = ui.config(b'experimental', b'copies.write-to')
836 copiesextramode = (b'changeset-only', b'compatibility')
836 copiesextramode = (b'changeset-only', b'compatibility')
837 if writecopiesto in copiesextramode:
837 if writecopiesto in copiesextramode:
838 options[b'copies-storage'] = b'extra'
838 options[b'copies-storage'] = b'extra'
839
839
840 return options
840 return options
841
841
842
842
843 def resolverevlogstorevfsoptions(ui, requirements, features):
843 def resolverevlogstorevfsoptions(ui, requirements, features):
844 """Resolve opener options specific to revlogs."""
844 """Resolve opener options specific to revlogs."""
845
845
846 options = {}
846 options = {}
847 options[b'flagprocessors'] = {}
847 options[b'flagprocessors'] = {}
848
848
849 if b'revlogv1' in requirements:
849 if b'revlogv1' in requirements:
850 options[b'revlogv1'] = True
850 options[b'revlogv1'] = True
851 if REVLOGV2_REQUIREMENT in requirements:
851 if REVLOGV2_REQUIREMENT in requirements:
852 options[b'revlogv2'] = True
852 options[b'revlogv2'] = True
853
853
854 if b'generaldelta' in requirements:
854 if b'generaldelta' in requirements:
855 options[b'generaldelta'] = True
855 options[b'generaldelta'] = True
856
856
857 # experimental config: format.chunkcachesize
857 # experimental config: format.chunkcachesize
858 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
858 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
859 if chunkcachesize is not None:
859 if chunkcachesize is not None:
860 options[b'chunkcachesize'] = chunkcachesize
860 options[b'chunkcachesize'] = chunkcachesize
861
861
862 deltabothparents = ui.configbool(
862 deltabothparents = ui.configbool(
863 b'storage', b'revlog.optimize-delta-parent-choice'
863 b'storage', b'revlog.optimize-delta-parent-choice'
864 )
864 )
865 options[b'deltabothparents'] = deltabothparents
865 options[b'deltabothparents'] = deltabothparents
866
866
867 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
867 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
868 lazydeltabase = False
868 lazydeltabase = False
869 if lazydelta:
869 if lazydelta:
870 lazydeltabase = ui.configbool(
870 lazydeltabase = ui.configbool(
871 b'storage', b'revlog.reuse-external-delta-parent'
871 b'storage', b'revlog.reuse-external-delta-parent'
872 )
872 )
873 if lazydeltabase is None:
873 if lazydeltabase is None:
874 lazydeltabase = not scmutil.gddeltaconfig(ui)
874 lazydeltabase = not scmutil.gddeltaconfig(ui)
875 options[b'lazydelta'] = lazydelta
875 options[b'lazydelta'] = lazydelta
876 options[b'lazydeltabase'] = lazydeltabase
876 options[b'lazydeltabase'] = lazydeltabase
877
877
878 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
878 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
879 if 0 <= chainspan:
879 if 0 <= chainspan:
880 options[b'maxdeltachainspan'] = chainspan
880 options[b'maxdeltachainspan'] = chainspan
881
881
882 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
882 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
883 if mmapindexthreshold is not None:
883 if mmapindexthreshold is not None:
884 options[b'mmapindexthreshold'] = mmapindexthreshold
884 options[b'mmapindexthreshold'] = mmapindexthreshold
885
885
886 withsparseread = ui.configbool(b'experimental', b'sparse-read')
886 withsparseread = ui.configbool(b'experimental', b'sparse-read')
887 srdensitythres = float(
887 srdensitythres = float(
888 ui.config(b'experimental', b'sparse-read.density-threshold')
888 ui.config(b'experimental', b'sparse-read.density-threshold')
889 )
889 )
890 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
890 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
891 options[b'with-sparse-read'] = withsparseread
891 options[b'with-sparse-read'] = withsparseread
892 options[b'sparse-read-density-threshold'] = srdensitythres
892 options[b'sparse-read-density-threshold'] = srdensitythres
893 options[b'sparse-read-min-gap-size'] = srmingapsize
893 options[b'sparse-read-min-gap-size'] = srmingapsize
894
894
895 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
895 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
896 options[b'sparse-revlog'] = sparserevlog
896 options[b'sparse-revlog'] = sparserevlog
897 if sparserevlog:
897 if sparserevlog:
898 options[b'generaldelta'] = True
898 options[b'generaldelta'] = True
899
899
900 sidedata = SIDEDATA_REQUIREMENT in requirements
900 sidedata = SIDEDATA_REQUIREMENT in requirements
901 options[b'side-data'] = sidedata
901 options[b'side-data'] = sidedata
902
902
903 maxchainlen = None
903 maxchainlen = None
904 if sparserevlog:
904 if sparserevlog:
905 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
905 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
906 # experimental config: format.maxchainlen
906 # experimental config: format.maxchainlen
907 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
907 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
908 if maxchainlen is not None:
908 if maxchainlen is not None:
909 options[b'maxchainlen'] = maxchainlen
909 options[b'maxchainlen'] = maxchainlen
910
910
911 for r in requirements:
911 for r in requirements:
912 # we allow multiple compression engine requirement to co-exist because
912 # we allow multiple compression engine requirement to co-exist because
913 # strickly speaking, revlog seems to support mixed compression style.
913 # strickly speaking, revlog seems to support mixed compression style.
914 #
914 #
915 # The compression used for new entries will be "the last one"
915 # The compression used for new entries will be "the last one"
916 prefix = r.startswith
916 prefix = r.startswith
917 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
917 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
918 options[b'compengine'] = r.split(b'-', 2)[2]
918 options[b'compengine'] = r.split(b'-', 2)[2]
919
919
920 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
920 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
921 if options[b'zlib.level'] is not None:
921 if options[b'zlib.level'] is not None:
922 if not (0 <= options[b'zlib.level'] <= 9):
922 if not (0 <= options[b'zlib.level'] <= 9):
923 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
923 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
924 raise error.Abort(msg % options[b'zlib.level'])
924 raise error.Abort(msg % options[b'zlib.level'])
925 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
925 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
926 if options[b'zstd.level'] is not None:
926 if options[b'zstd.level'] is not None:
927 if not (0 <= options[b'zstd.level'] <= 22):
927 if not (0 <= options[b'zstd.level'] <= 22):
928 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
928 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
929 raise error.Abort(msg % options[b'zstd.level'])
929 raise error.Abort(msg % options[b'zstd.level'])
930
930
931 if repository.NARROW_REQUIREMENT in requirements:
931 if repository.NARROW_REQUIREMENT in requirements:
932 options[b'enableellipsis'] = True
932 options[b'enableellipsis'] = True
933
933
934 if ui.configbool(b'experimental', b'rust.index'):
934 if ui.configbool(b'experimental', b'rust.index'):
935 options[b'rust.index'] = True
935 options[b'rust.index'] = True
936 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
936 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
937 options[b'exp-persistent-nodemap'] = True
937 options[b'exp-persistent-nodemap'] = True
938 if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
938 if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
939 options[b'exp-persistent-nodemap.mmap'] = True
939 options[b'exp-persistent-nodemap.mmap'] = True
940 if ui.configbool(b'devel', b'persistent-nodemap'):
940 if ui.configbool(b'devel', b'persistent-nodemap'):
941 options[b'devel-force-nodemap'] = True
941 options[b'devel-force-nodemap'] = True
942
942
943 return options
943 return options
944
944
945
945
946 def makemain(**kwargs):
946 def makemain(**kwargs):
947 """Produce a type conforming to ``ilocalrepositorymain``."""
947 """Produce a type conforming to ``ilocalrepositorymain``."""
948 return localrepository
948 return localrepository
949
949
950
950
951 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
951 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
952 class revlogfilestorage(object):
952 class revlogfilestorage(object):
953 """File storage when using revlogs."""
953 """File storage when using revlogs."""
954
954
955 def file(self, path):
955 def file(self, path):
956 if path[0] == b'/':
956 if path[0] == b'/':
957 path = path[1:]
957 path = path[1:]
958
958
959 return filelog.filelog(self.svfs, path)
959 return filelog.filelog(self.svfs, path)
960
960
961
961
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 class revlognarrowfilestorage(object):
963 class revlognarrowfilestorage(object):
964 """File storage when using revlogs and narrow files."""
964 """File storage when using revlogs and narrow files."""
965
965
966 def file(self, path):
966 def file(self, path):
967 if path[0] == b'/':
967 if path[0] == b'/':
968 path = path[1:]
968 path = path[1:]
969
969
970 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
970 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
971
971
972
972
973 def makefilestorage(requirements, features, **kwargs):
973 def makefilestorage(requirements, features, **kwargs):
974 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
974 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
975 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
975 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
976 features.add(repository.REPO_FEATURE_STREAM_CLONE)
976 features.add(repository.REPO_FEATURE_STREAM_CLONE)
977
977
978 if repository.NARROW_REQUIREMENT in requirements:
978 if repository.NARROW_REQUIREMENT in requirements:
979 return revlognarrowfilestorage
979 return revlognarrowfilestorage
980 else:
980 else:
981 return revlogfilestorage
981 return revlogfilestorage
982
982
983
983
984 # List of repository interfaces and factory functions for them. Each
984 # List of repository interfaces and factory functions for them. Each
985 # will be called in order during ``makelocalrepository()`` to iteratively
985 # will be called in order during ``makelocalrepository()`` to iteratively
986 # derive the final type for a local repository instance. We capture the
986 # derive the final type for a local repository instance. We capture the
987 # function as a lambda so we don't hold a reference and the module-level
987 # function as a lambda so we don't hold a reference and the module-level
988 # functions can be wrapped.
988 # functions can be wrapped.
989 REPO_INTERFACES = [
989 REPO_INTERFACES = [
990 (repository.ilocalrepositorymain, lambda: makemain),
990 (repository.ilocalrepositorymain, lambda: makemain),
991 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
991 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
992 ]
992 ]
993
993
994
994
995 @interfaceutil.implementer(repository.ilocalrepositorymain)
995 @interfaceutil.implementer(repository.ilocalrepositorymain)
996 class localrepository(object):
996 class localrepository(object):
997 """Main class for representing local repositories.
997 """Main class for representing local repositories.
998
998
999 All local repositories are instances of this class.
999 All local repositories are instances of this class.
1000
1000
1001 Constructed on its own, instances of this class are not usable as
1001 Constructed on its own, instances of this class are not usable as
1002 repository objects. To obtain a usable repository object, call
1002 repository objects. To obtain a usable repository object, call
1003 ``hg.repository()``, ``localrepo.instance()``, or
1003 ``hg.repository()``, ``localrepo.instance()``, or
1004 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1004 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1005 ``instance()`` adds support for creating new repositories.
1005 ``instance()`` adds support for creating new repositories.
1006 ``hg.repository()`` adds more extension integration, including calling
1006 ``hg.repository()`` adds more extension integration, including calling
1007 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1007 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1008 used.
1008 used.
1009 """
1009 """
1010
1010
1011 # obsolete experimental requirements:
1011 # obsolete experimental requirements:
1012 # - manifestv2: An experimental new manifest format that allowed
1012 # - manifestv2: An experimental new manifest format that allowed
1013 # for stem compression of long paths. Experiment ended up not
1013 # for stem compression of long paths. Experiment ended up not
1014 # being successful (repository sizes went up due to worse delta
1014 # being successful (repository sizes went up due to worse delta
1015 # chains), and the code was deleted in 4.6.
1015 # chains), and the code was deleted in 4.6.
1016 supportedformats = {
1016 supportedformats = {
1017 b'revlogv1',
1017 b'revlogv1',
1018 b'generaldelta',
1018 b'generaldelta',
1019 b'treemanifest',
1019 b'treemanifest',
1020 COPIESSDC_REQUIREMENT,
1020 COPIESSDC_REQUIREMENT,
1021 REVLOGV2_REQUIREMENT,
1021 REVLOGV2_REQUIREMENT,
1022 SIDEDATA_REQUIREMENT,
1022 SIDEDATA_REQUIREMENT,
1023 SPARSEREVLOG_REQUIREMENT,
1023 SPARSEREVLOG_REQUIREMENT,
1024 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1024 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1025 }
1025 }
1026 _basesupported = supportedformats | {
1026 _basesupported = supportedformats | {
1027 b'store',
1027 b'store',
1028 b'fncache',
1028 b'fncache',
1029 b'shared',
1029 b'shared',
1030 b'relshared',
1030 b'relshared',
1031 b'dotencode',
1031 b'dotencode',
1032 b'exp-sparse',
1032 b'exp-sparse',
1033 b'internal-phase',
1033 b'internal-phase',
1034 }
1034 }
1035
1035
1036 # list of prefix for file which can be written without 'wlock'
1036 # list of prefix for file which can be written without 'wlock'
1037 # Extensions should extend this list when needed
1037 # Extensions should extend this list when needed
1038 _wlockfreeprefix = {
1038 _wlockfreeprefix = {
1039 # We migh consider requiring 'wlock' for the next
1039 # We migh consider requiring 'wlock' for the next
1040 # two, but pretty much all the existing code assume
1040 # two, but pretty much all the existing code assume
1041 # wlock is not needed so we keep them excluded for
1041 # wlock is not needed so we keep them excluded for
1042 # now.
1042 # now.
1043 b'hgrc',
1043 b'hgrc',
1044 b'requires',
1044 b'requires',
1045 # XXX cache is a complicatged business someone
1045 # XXX cache is a complicatged business someone
1046 # should investigate this in depth at some point
1046 # should investigate this in depth at some point
1047 b'cache/',
1047 b'cache/',
1048 # XXX shouldn't be dirstate covered by the wlock?
1048 # XXX shouldn't be dirstate covered by the wlock?
1049 b'dirstate',
1049 b'dirstate',
1050 # XXX bisect was still a bit too messy at the time
1050 # XXX bisect was still a bit too messy at the time
1051 # this changeset was introduced. Someone should fix
1051 # this changeset was introduced. Someone should fix
1052 # the remainig bit and drop this line
1052 # the remainig bit and drop this line
1053 b'bisect.state',
1053 b'bisect.state',
1054 }
1054 }
1055
1055
1056 def __init__(
1056 def __init__(
1057 self,
1057 self,
1058 baseui,
1058 baseui,
1059 ui,
1059 ui,
1060 origroot,
1060 origroot,
1061 wdirvfs,
1061 wdirvfs,
1062 hgvfs,
1062 hgvfs,
1063 requirements,
1063 requirements,
1064 supportedrequirements,
1064 supportedrequirements,
1065 sharedpath,
1065 sharedpath,
1066 store,
1066 store,
1067 cachevfs,
1067 cachevfs,
1068 wcachevfs,
1068 wcachevfs,
1069 features,
1069 features,
1070 intents=None,
1070 intents=None,
1071 ):
1071 ):
1072 """Create a new local repository instance.
1072 """Create a new local repository instance.
1073
1073
1074 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1074 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1075 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1075 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1076 object.
1076 object.
1077
1077
1078 Arguments:
1078 Arguments:
1079
1079
1080 baseui
1080 baseui
1081 ``ui.ui`` instance that ``ui`` argument was based off of.
1081 ``ui.ui`` instance that ``ui`` argument was based off of.
1082
1082
1083 ui
1083 ui
1084 ``ui.ui`` instance for use by the repository.
1084 ``ui.ui`` instance for use by the repository.
1085
1085
1086 origroot
1086 origroot
1087 ``bytes`` path to working directory root of this repository.
1087 ``bytes`` path to working directory root of this repository.
1088
1088
1089 wdirvfs
1089 wdirvfs
1090 ``vfs.vfs`` rooted at the working directory.
1090 ``vfs.vfs`` rooted at the working directory.
1091
1091
1092 hgvfs
1092 hgvfs
1093 ``vfs.vfs`` rooted at .hg/
1093 ``vfs.vfs`` rooted at .hg/
1094
1094
1095 requirements
1095 requirements
1096 ``set`` of bytestrings representing repository opening requirements.
1096 ``set`` of bytestrings representing repository opening requirements.
1097
1097
1098 supportedrequirements
1098 supportedrequirements
1099 ``set`` of bytestrings representing repository requirements that we
1099 ``set`` of bytestrings representing repository requirements that we
1100 know how to open. May be a supetset of ``requirements``.
1100 know how to open. May be a supetset of ``requirements``.
1101
1101
1102 sharedpath
1102 sharedpath
1103 ``bytes`` Defining path to storage base directory. Points to a
1103 ``bytes`` Defining path to storage base directory. Points to a
1104 ``.hg/`` directory somewhere.
1104 ``.hg/`` directory somewhere.
1105
1105
1106 store
1106 store
1107 ``store.basicstore`` (or derived) instance providing access to
1107 ``store.basicstore`` (or derived) instance providing access to
1108 versioned storage.
1108 versioned storage.
1109
1109
1110 cachevfs
1110 cachevfs
1111 ``vfs.vfs`` used for cache files.
1111 ``vfs.vfs`` used for cache files.
1112
1112
1113 wcachevfs
1113 wcachevfs
1114 ``vfs.vfs`` used for cache files related to the working copy.
1114 ``vfs.vfs`` used for cache files related to the working copy.
1115
1115
1116 features
1116 features
1117 ``set`` of bytestrings defining features/capabilities of this
1117 ``set`` of bytestrings defining features/capabilities of this
1118 instance.
1118 instance.
1119
1119
1120 intents
1120 intents
1121 ``set`` of system strings indicating what this repo will be used
1121 ``set`` of system strings indicating what this repo will be used
1122 for.
1122 for.
1123 """
1123 """
1124 self.baseui = baseui
1124 self.baseui = baseui
1125 self.ui = ui
1125 self.ui = ui
1126 self.origroot = origroot
1126 self.origroot = origroot
1127 # vfs rooted at working directory.
1127 # vfs rooted at working directory.
1128 self.wvfs = wdirvfs
1128 self.wvfs = wdirvfs
1129 self.root = wdirvfs.base
1129 self.root = wdirvfs.base
1130 # vfs rooted at .hg/. Used to access most non-store paths.
1130 # vfs rooted at .hg/. Used to access most non-store paths.
1131 self.vfs = hgvfs
1131 self.vfs = hgvfs
1132 self.path = hgvfs.base
1132 self.path = hgvfs.base
1133 self.requirements = requirements
1133 self.requirements = requirements
1134 self.supported = supportedrequirements
1134 self.supported = supportedrequirements
1135 self.sharedpath = sharedpath
1135 self.sharedpath = sharedpath
1136 self.store = store
1136 self.store = store
1137 self.cachevfs = cachevfs
1137 self.cachevfs = cachevfs
1138 self.wcachevfs = wcachevfs
1138 self.wcachevfs = wcachevfs
1139 self.features = features
1139 self.features = features
1140
1140
1141 self.filtername = None
1141 self.filtername = None
1142
1142
1143 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1143 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1144 b'devel', b'check-locks'
1144 b'devel', b'check-locks'
1145 ):
1145 ):
1146 self.vfs.audit = self._getvfsward(self.vfs.audit)
1146 self.vfs.audit = self._getvfsward(self.vfs.audit)
1147 # A list of callback to shape the phase if no data were found.
1147 # A list of callback to shape the phase if no data were found.
1148 # Callback are in the form: func(repo, roots) --> processed root.
1148 # Callback are in the form: func(repo, roots) --> processed root.
1149 # This list it to be filled by extension during repo setup
1149 # This list it to be filled by extension during repo setup
1150 self._phasedefaults = []
1150 self._phasedefaults = []
1151
1151
1152 color.setup(self.ui)
1152 color.setup(self.ui)
1153
1153
1154 self.spath = self.store.path
1154 self.spath = self.store.path
1155 self.svfs = self.store.vfs
1155 self.svfs = self.store.vfs
1156 self.sjoin = self.store.join
1156 self.sjoin = self.store.join
1157 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1157 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1158 b'devel', b'check-locks'
1158 b'devel', b'check-locks'
1159 ):
1159 ):
1160 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1160 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1161 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1161 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1162 else: # standard vfs
1162 else: # standard vfs
1163 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1163 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1164
1164
1165 self._dirstatevalidatewarned = False
1165 self._dirstatevalidatewarned = False
1166
1166
1167 self._branchcaches = branchmap.BranchMapCache()
1167 self._branchcaches = branchmap.BranchMapCache()
1168 self._revbranchcache = None
1168 self._revbranchcache = None
1169 self._filterpats = {}
1169 self._filterpats = {}
1170 self._datafilters = {}
1170 self._datafilters = {}
1171 self._transref = self._lockref = self._wlockref = None
1171 self._transref = self._lockref = self._wlockref = None
1172
1172
1173 # A cache for various files under .hg/ that tracks file changes,
1173 # A cache for various files under .hg/ that tracks file changes,
1174 # (used by the filecache decorator)
1174 # (used by the filecache decorator)
1175 #
1175 #
1176 # Maps a property name to its util.filecacheentry
1176 # Maps a property name to its util.filecacheentry
1177 self._filecache = {}
1177 self._filecache = {}
1178
1178
1179 # hold sets of revision to be filtered
1179 # hold sets of revision to be filtered
1180 # should be cleared when something might have changed the filter value:
1180 # should be cleared when something might have changed the filter value:
1181 # - new changesets,
1181 # - new changesets,
1182 # - phase change,
1182 # - phase change,
1183 # - new obsolescence marker,
1183 # - new obsolescence marker,
1184 # - working directory parent change,
1184 # - working directory parent change,
1185 # - bookmark changes
1185 # - bookmark changes
1186 self.filteredrevcache = {}
1186 self.filteredrevcache = {}
1187
1187
1188 # post-dirstate-status hooks
1188 # post-dirstate-status hooks
1189 self._postdsstatus = []
1189 self._postdsstatus = []
1190
1190
1191 # generic mapping between names and nodes
1191 # generic mapping between names and nodes
1192 self.names = namespaces.namespaces()
1192 self.names = namespaces.namespaces()
1193
1193
1194 # Key to signature value.
1194 # Key to signature value.
1195 self._sparsesignaturecache = {}
1195 self._sparsesignaturecache = {}
1196 # Signature to cached matcher instance.
1196 # Signature to cached matcher instance.
1197 self._sparsematchercache = {}
1197 self._sparsematchercache = {}
1198
1198
1199 self._extrafilterid = repoview.extrafilter(ui)
1199 self._extrafilterid = repoview.extrafilter(ui)
1200
1200
1201 self.filecopiesmode = None
1201 self.filecopiesmode = None
1202 if COPIESSDC_REQUIREMENT in self.requirements:
1202 if COPIESSDC_REQUIREMENT in self.requirements:
1203 self.filecopiesmode = b'changeset-sidedata'
1203 self.filecopiesmode = b'changeset-sidedata'
1204
1204
1205 def _getvfsward(self, origfunc):
1205 def _getvfsward(self, origfunc):
1206 """build a ward for self.vfs"""
1206 """build a ward for self.vfs"""
1207 rref = weakref.ref(self)
1207 rref = weakref.ref(self)
1208
1208
1209 def checkvfs(path, mode=None):
1209 def checkvfs(path, mode=None):
1210 ret = origfunc(path, mode=mode)
1210 ret = origfunc(path, mode=mode)
1211 repo = rref()
1211 repo = rref()
1212 if (
1212 if (
1213 repo is None
1213 repo is None
1214 or not util.safehasattr(repo, b'_wlockref')
1214 or not util.safehasattr(repo, b'_wlockref')
1215 or not util.safehasattr(repo, b'_lockref')
1215 or not util.safehasattr(repo, b'_lockref')
1216 ):
1216 ):
1217 return
1217 return
1218 if mode in (None, b'r', b'rb'):
1218 if mode in (None, b'r', b'rb'):
1219 return
1219 return
1220 if path.startswith(repo.path):
1220 if path.startswith(repo.path):
1221 # truncate name relative to the repository (.hg)
1221 # truncate name relative to the repository (.hg)
1222 path = path[len(repo.path) + 1 :]
1222 path = path[len(repo.path) + 1 :]
1223 if path.startswith(b'cache/'):
1223 if path.startswith(b'cache/'):
1224 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1224 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1225 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1225 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1226 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1226 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1227 # journal is covered by 'lock'
1227 # journal is covered by 'lock'
1228 if repo._currentlock(repo._lockref) is None:
1228 if repo._currentlock(repo._lockref) is None:
1229 repo.ui.develwarn(
1229 repo.ui.develwarn(
1230 b'write with no lock: "%s"' % path,
1230 b'write with no lock: "%s"' % path,
1231 stacklevel=3,
1231 stacklevel=3,
1232 config=b'check-locks',
1232 config=b'check-locks',
1233 )
1233 )
1234 elif repo._currentlock(repo._wlockref) is None:
1234 elif repo._currentlock(repo._wlockref) is None:
1235 # rest of vfs files are covered by 'wlock'
1235 # rest of vfs files are covered by 'wlock'
1236 #
1236 #
1237 # exclude special files
1237 # exclude special files
1238 for prefix in self._wlockfreeprefix:
1238 for prefix in self._wlockfreeprefix:
1239 if path.startswith(prefix):
1239 if path.startswith(prefix):
1240 return
1240 return
1241 repo.ui.develwarn(
1241 repo.ui.develwarn(
1242 b'write with no wlock: "%s"' % path,
1242 b'write with no wlock: "%s"' % path,
1243 stacklevel=3,
1243 stacklevel=3,
1244 config=b'check-locks',
1244 config=b'check-locks',
1245 )
1245 )
1246 return ret
1246 return ret
1247
1247
1248 return checkvfs
1248 return checkvfs
1249
1249
1250 def _getsvfsward(self, origfunc):
1250 def _getsvfsward(self, origfunc):
1251 """build a ward for self.svfs"""
1251 """build a ward for self.svfs"""
1252 rref = weakref.ref(self)
1252 rref = weakref.ref(self)
1253
1253
1254 def checksvfs(path, mode=None):
1254 def checksvfs(path, mode=None):
1255 ret = origfunc(path, mode=mode)
1255 ret = origfunc(path, mode=mode)
1256 repo = rref()
1256 repo = rref()
1257 if repo is None or not util.safehasattr(repo, b'_lockref'):
1257 if repo is None or not util.safehasattr(repo, b'_lockref'):
1258 return
1258 return
1259 if mode in (None, b'r', b'rb'):
1259 if mode in (None, b'r', b'rb'):
1260 return
1260 return
1261 if path.startswith(repo.sharedpath):
1261 if path.startswith(repo.sharedpath):
1262 # truncate name relative to the repository (.hg)
1262 # truncate name relative to the repository (.hg)
1263 path = path[len(repo.sharedpath) + 1 :]
1263 path = path[len(repo.sharedpath) + 1 :]
1264 if repo._currentlock(repo._lockref) is None:
1264 if repo._currentlock(repo._lockref) is None:
1265 repo.ui.develwarn(
1265 repo.ui.develwarn(
1266 b'write with no lock: "%s"' % path, stacklevel=4
1266 b'write with no lock: "%s"' % path, stacklevel=4
1267 )
1267 )
1268 return ret
1268 return ret
1269
1269
1270 return checksvfs
1270 return checksvfs
1271
1271
1272 def close(self):
1272 def close(self):
1273 self._writecaches()
1273 self._writecaches()
1274
1274
1275 def _writecaches(self):
1275 def _writecaches(self):
1276 if self._revbranchcache:
1276 if self._revbranchcache:
1277 self._revbranchcache.write()
1277 self._revbranchcache.write()
1278
1278
1279 def _restrictcapabilities(self, caps):
1279 def _restrictcapabilities(self, caps):
1280 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1280 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1281 caps = set(caps)
1281 caps = set(caps)
1282 capsblob = bundle2.encodecaps(
1282 capsblob = bundle2.encodecaps(
1283 bundle2.getrepocaps(self, role=b'client')
1283 bundle2.getrepocaps(self, role=b'client')
1284 )
1284 )
1285 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1285 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1286 return caps
1286 return caps
1287
1287
1288 def _writerequirements(self):
1288 def _writerequirements(self):
1289 scmutil.writerequires(self.vfs, self.requirements)
1289 scmutil.writerequires(self.vfs, self.requirements)
1290
1290
1291 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1291 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1292 # self -> auditor -> self._checknested -> self
1292 # self -> auditor -> self._checknested -> self
1293
1293
1294 @property
1294 @property
1295 def auditor(self):
1295 def auditor(self):
1296 # This is only used by context.workingctx.match in order to
1296 # This is only used by context.workingctx.match in order to
1297 # detect files in subrepos.
1297 # detect files in subrepos.
1298 return pathutil.pathauditor(self.root, callback=self._checknested)
1298 return pathutil.pathauditor(self.root, callback=self._checknested)
1299
1299
1300 @property
1300 @property
1301 def nofsauditor(self):
1301 def nofsauditor(self):
1302 # This is only used by context.basectx.match in order to detect
1302 # This is only used by context.basectx.match in order to detect
1303 # files in subrepos.
1303 # files in subrepos.
1304 return pathutil.pathauditor(
1304 return pathutil.pathauditor(
1305 self.root, callback=self._checknested, realfs=False, cached=True
1305 self.root, callback=self._checknested, realfs=False, cached=True
1306 )
1306 )
1307
1307
1308 def _checknested(self, path):
1308 def _checknested(self, path):
1309 """Determine if path is a legal nested repository."""
1309 """Determine if path is a legal nested repository."""
1310 if not path.startswith(self.root):
1310 if not path.startswith(self.root):
1311 return False
1311 return False
1312 subpath = path[len(self.root) + 1 :]
1312 subpath = path[len(self.root) + 1 :]
1313 normsubpath = util.pconvert(subpath)
1313 normsubpath = util.pconvert(subpath)
1314
1314
1315 # XXX: Checking against the current working copy is wrong in
1315 # XXX: Checking against the current working copy is wrong in
1316 # the sense that it can reject things like
1316 # the sense that it can reject things like
1317 #
1317 #
1318 # $ hg cat -r 10 sub/x.txt
1318 # $ hg cat -r 10 sub/x.txt
1319 #
1319 #
1320 # if sub/ is no longer a subrepository in the working copy
1320 # if sub/ is no longer a subrepository in the working copy
1321 # parent revision.
1321 # parent revision.
1322 #
1322 #
1323 # However, it can of course also allow things that would have
1323 # However, it can of course also allow things that would have
1324 # been rejected before, such as the above cat command if sub/
1324 # been rejected before, such as the above cat command if sub/
1325 # is a subrepository now, but was a normal directory before.
1325 # is a subrepository now, but was a normal directory before.
1326 # The old path auditor would have rejected by mistake since it
1326 # The old path auditor would have rejected by mistake since it
1327 # panics when it sees sub/.hg/.
1327 # panics when it sees sub/.hg/.
1328 #
1328 #
1329 # All in all, checking against the working copy seems sensible
1329 # All in all, checking against the working copy seems sensible
1330 # since we want to prevent access to nested repositories on
1330 # since we want to prevent access to nested repositories on
1331 # the filesystem *now*.
1331 # the filesystem *now*.
1332 ctx = self[None]
1332 ctx = self[None]
1333 parts = util.splitpath(subpath)
1333 parts = util.splitpath(subpath)
1334 while parts:
1334 while parts:
1335 prefix = b'/'.join(parts)
1335 prefix = b'/'.join(parts)
1336 if prefix in ctx.substate:
1336 if prefix in ctx.substate:
1337 if prefix == normsubpath:
1337 if prefix == normsubpath:
1338 return True
1338 return True
1339 else:
1339 else:
1340 sub = ctx.sub(prefix)
1340 sub = ctx.sub(prefix)
1341 return sub.checknested(subpath[len(prefix) + 1 :])
1341 return sub.checknested(subpath[len(prefix) + 1 :])
1342 else:
1342 else:
1343 parts.pop()
1343 parts.pop()
1344 return False
1344 return False
1345
1345
1346 def peer(self):
1346 def peer(self):
1347 return localpeer(self) # not cached to avoid reference cycle
1347 return localpeer(self) # not cached to avoid reference cycle
1348
1348
1349 def unfiltered(self):
1349 def unfiltered(self):
1350 """Return unfiltered version of the repository
1350 """Return unfiltered version of the repository
1351
1351
1352 Intended to be overwritten by filtered repo."""
1352 Intended to be overwritten by filtered repo."""
1353 return self
1353 return self
1354
1354
1355 def filtered(self, name, visibilityexceptions=None):
1355 def filtered(self, name, visibilityexceptions=None):
1356 """Return a filtered version of a repository
1356 """Return a filtered version of a repository
1357
1357
1358 The `name` parameter is the identifier of the requested view. This
1358 The `name` parameter is the identifier of the requested view. This
1359 will return a repoview object set "exactly" to the specified view.
1359 will return a repoview object set "exactly" to the specified view.
1360
1360
1361 This function does not apply recursive filtering to a repository. For
1361 This function does not apply recursive filtering to a repository. For
1362 example calling `repo.filtered("served")` will return a repoview using
1362 example calling `repo.filtered("served")` will return a repoview using
1363 the "served" view, regardless of the initial view used by `repo`.
1363 the "served" view, regardless of the initial view used by `repo`.
1364
1364
1365 In other word, there is always only one level of `repoview` "filtering".
1365 In other word, there is always only one level of `repoview` "filtering".
1366 """
1366 """
1367 if self._extrafilterid is not None and b'%' not in name:
1367 if self._extrafilterid is not None and b'%' not in name:
1368 name = name + b'%' + self._extrafilterid
1368 name = name + b'%' + self._extrafilterid
1369
1369
1370 cls = repoview.newtype(self.unfiltered().__class__)
1370 cls = repoview.newtype(self.unfiltered().__class__)
1371 return cls(self, name, visibilityexceptions)
1371 return cls(self, name, visibilityexceptions)
1372
1372
1373 @mixedrepostorecache(
1373 @mixedrepostorecache(
1374 (b'bookmarks', b'plain'),
1374 (b'bookmarks', b'plain'),
1375 (b'bookmarks.current', b'plain'),
1375 (b'bookmarks.current', b'plain'),
1376 (b'bookmarks', b''),
1376 (b'bookmarks', b''),
1377 (b'00changelog.i', b''),
1377 (b'00changelog.i', b''),
1378 )
1378 )
1379 def _bookmarks(self):
1379 def _bookmarks(self):
1380 # Since the multiple files involved in the transaction cannot be
1380 # Since the multiple files involved in the transaction cannot be
1381 # written atomically (with current repository format), there is a race
1381 # written atomically (with current repository format), there is a race
1382 # condition here.
1382 # condition here.
1383 #
1383 #
1384 # 1) changelog content A is read
1384 # 1) changelog content A is read
1385 # 2) outside transaction update changelog to content B
1385 # 2) outside transaction update changelog to content B
1386 # 3) outside transaction update bookmark file referring to content B
1386 # 3) outside transaction update bookmark file referring to content B
1387 # 4) bookmarks file content is read and filtered against changelog-A
1387 # 4) bookmarks file content is read and filtered against changelog-A
1388 #
1388 #
1389 # When this happens, bookmarks against nodes missing from A are dropped.
1389 # When this happens, bookmarks against nodes missing from A are dropped.
1390 #
1390 #
1391 # Having this happening during read is not great, but it become worse
1391 # Having this happening during read is not great, but it become worse
1392 # when this happen during write because the bookmarks to the "unknown"
1392 # when this happen during write because the bookmarks to the "unknown"
1393 # nodes will be dropped for good. However, writes happen within locks.
1393 # nodes will be dropped for good. However, writes happen within locks.
1394 # This locking makes it possible to have a race free consistent read.
1394 # This locking makes it possible to have a race free consistent read.
1395 # For this purpose data read from disc before locking are
1395 # For this purpose data read from disc before locking are
1396 # "invalidated" right after the locks are taken. This invalidations are
1396 # "invalidated" right after the locks are taken. This invalidations are
1397 # "light", the `filecache` mechanism keep the data in memory and will
1397 # "light", the `filecache` mechanism keep the data in memory and will
1398 # reuse them if the underlying files did not changed. Not parsing the
1398 # reuse them if the underlying files did not changed. Not parsing the
1399 # same data multiple times helps performances.
1399 # same data multiple times helps performances.
1400 #
1400 #
1401 # Unfortunately in the case describe above, the files tracked by the
1401 # Unfortunately in the case describe above, the files tracked by the
1402 # bookmarks file cache might not have changed, but the in-memory
1402 # bookmarks file cache might not have changed, but the in-memory
1403 # content is still "wrong" because we used an older changelog content
1403 # content is still "wrong" because we used an older changelog content
1404 # to process the on-disk data. So after locking, the changelog would be
1404 # to process the on-disk data. So after locking, the changelog would be
1405 # refreshed but `_bookmarks` would be preserved.
1405 # refreshed but `_bookmarks` would be preserved.
1406 # Adding `00changelog.i` to the list of tracked file is not
1406 # Adding `00changelog.i` to the list of tracked file is not
1407 # enough, because at the time we build the content for `_bookmarks` in
1407 # enough, because at the time we build the content for `_bookmarks` in
1408 # (4), the changelog file has already diverged from the content used
1408 # (4), the changelog file has already diverged from the content used
1409 # for loading `changelog` in (1)
1409 # for loading `changelog` in (1)
1410 #
1410 #
1411 # To prevent the issue, we force the changelog to be explicitly
1411 # To prevent the issue, we force the changelog to be explicitly
1412 # reloaded while computing `_bookmarks`. The data race can still happen
1412 # reloaded while computing `_bookmarks`. The data race can still happen
1413 # without the lock (with a narrower window), but it would no longer go
1413 # without the lock (with a narrower window), but it would no longer go
1414 # undetected during the lock time refresh.
1414 # undetected during the lock time refresh.
1415 #
1415 #
1416 # The new schedule is as follow
1416 # The new schedule is as follow
1417 #
1417 #
1418 # 1) filecache logic detect that `_bookmarks` needs to be computed
1418 # 1) filecache logic detect that `_bookmarks` needs to be computed
1419 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1419 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1420 # 3) We force `changelog` filecache to be tested
1420 # 3) We force `changelog` filecache to be tested
1421 # 4) cachestat for `changelog` are captured (for changelog)
1421 # 4) cachestat for `changelog` are captured (for changelog)
1422 # 5) `_bookmarks` is computed and cached
1422 # 5) `_bookmarks` is computed and cached
1423 #
1423 #
1424 # The step in (3) ensure we have a changelog at least as recent as the
1424 # The step in (3) ensure we have a changelog at least as recent as the
1425 # cache stat computed in (1). As a result at locking time:
1425 # cache stat computed in (1). As a result at locking time:
1426 # * if the changelog did not changed since (1) -> we can reuse the data
1426 # * if the changelog did not changed since (1) -> we can reuse the data
1427 # * otherwise -> the bookmarks get refreshed.
1427 # * otherwise -> the bookmarks get refreshed.
1428 self._refreshchangelog()
1428 self._refreshchangelog()
1429 return bookmarks.bmstore(self)
1429 return bookmarks.bmstore(self)
1430
1430
1431 def _refreshchangelog(self):
1431 def _refreshchangelog(self):
1432 """make sure the in memory changelog match the on-disk one"""
1432 """make sure the in memory changelog match the on-disk one"""
1433 if 'changelog' in vars(self) and self.currenttransaction() is None:
1433 if 'changelog' in vars(self) and self.currenttransaction() is None:
1434 del self.changelog
1434 del self.changelog
1435
1435
1436 @property
1436 @property
1437 def _activebookmark(self):
1437 def _activebookmark(self):
1438 return self._bookmarks.active
1438 return self._bookmarks.active
1439
1439
1440 # _phasesets depend on changelog. what we need is to call
1440 # _phasesets depend on changelog. what we need is to call
1441 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1441 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1442 # can't be easily expressed in filecache mechanism.
1442 # can't be easily expressed in filecache mechanism.
1443 @storecache(b'phaseroots', b'00changelog.i')
1443 @storecache(b'phaseroots', b'00changelog.i')
1444 def _phasecache(self):
1444 def _phasecache(self):
1445 return phases.phasecache(self, self._phasedefaults)
1445 return phases.phasecache(self, self._phasedefaults)
1446
1446
1447 @storecache(b'obsstore')
1447 @storecache(b'obsstore')
1448 def obsstore(self):
1448 def obsstore(self):
1449 return obsolete.makestore(self.ui, self)
1449 return obsolete.makestore(self.ui, self)
1450
1450
1451 @storecache(b'00changelog.i')
1451 @storecache(b'00changelog.i')
1452 def changelog(self):
1452 def changelog(self):
1453 # load dirstate before changelog to avoid race see issue6303
1454 self.dirstate.prefetch_parents()
1453 return self.store.changelog(txnutil.mayhavepending(self.root))
1455 return self.store.changelog(txnutil.mayhavepending(self.root))
1454
1456
1455 @storecache(b'00manifest.i')
1457 @storecache(b'00manifest.i')
1456 def manifestlog(self):
1458 def manifestlog(self):
1457 return self.store.manifestlog(self, self._storenarrowmatch)
1459 return self.store.manifestlog(self, self._storenarrowmatch)
1458
1460
1459 @repofilecache(b'dirstate')
1461 @repofilecache(b'dirstate')
1460 def dirstate(self):
1462 def dirstate(self):
1461 return self._makedirstate()
1463 return self._makedirstate()
1462
1464
1463 def _makedirstate(self):
1465 def _makedirstate(self):
1464 """Extension point for wrapping the dirstate per-repo."""
1466 """Extension point for wrapping the dirstate per-repo."""
1465 sparsematchfn = lambda: sparse.matcher(self)
1467 sparsematchfn = lambda: sparse.matcher(self)
1466
1468
1467 return dirstate.dirstate(
1469 return dirstate.dirstate(
1468 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1470 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1469 )
1471 )
1470
1472
1471 def _dirstatevalidate(self, node):
1473 def _dirstatevalidate(self, node):
1472 try:
1474 try:
1473 self.changelog.rev(node)
1475 self.changelog.rev(node)
1474 return node
1476 return node
1475 except error.LookupError:
1477 except error.LookupError:
1476 if not self._dirstatevalidatewarned:
1478 if not self._dirstatevalidatewarned:
1477 self._dirstatevalidatewarned = True
1479 self._dirstatevalidatewarned = True
1478 self.ui.warn(
1480 self.ui.warn(
1479 _(b"warning: ignoring unknown working parent %s!\n")
1481 _(b"warning: ignoring unknown working parent %s!\n")
1480 % short(node)
1482 % short(node)
1481 )
1483 )
1482 return nullid
1484 return nullid
1483
1485
1484 @storecache(narrowspec.FILENAME)
1486 @storecache(narrowspec.FILENAME)
1485 def narrowpats(self):
1487 def narrowpats(self):
1486 """matcher patterns for this repository's narrowspec
1488 """matcher patterns for this repository's narrowspec
1487
1489
1488 A tuple of (includes, excludes).
1490 A tuple of (includes, excludes).
1489 """
1491 """
1490 return narrowspec.load(self)
1492 return narrowspec.load(self)
1491
1493
1492 @storecache(narrowspec.FILENAME)
1494 @storecache(narrowspec.FILENAME)
1493 def _storenarrowmatch(self):
1495 def _storenarrowmatch(self):
1494 if repository.NARROW_REQUIREMENT not in self.requirements:
1496 if repository.NARROW_REQUIREMENT not in self.requirements:
1495 return matchmod.always()
1497 return matchmod.always()
1496 include, exclude = self.narrowpats
1498 include, exclude = self.narrowpats
1497 return narrowspec.match(self.root, include=include, exclude=exclude)
1499 return narrowspec.match(self.root, include=include, exclude=exclude)
1498
1500
1499 @storecache(narrowspec.FILENAME)
1501 @storecache(narrowspec.FILENAME)
1500 def _narrowmatch(self):
1502 def _narrowmatch(self):
1501 if repository.NARROW_REQUIREMENT not in self.requirements:
1503 if repository.NARROW_REQUIREMENT not in self.requirements:
1502 return matchmod.always()
1504 return matchmod.always()
1503 narrowspec.checkworkingcopynarrowspec(self)
1505 narrowspec.checkworkingcopynarrowspec(self)
1504 include, exclude = self.narrowpats
1506 include, exclude = self.narrowpats
1505 return narrowspec.match(self.root, include=include, exclude=exclude)
1507 return narrowspec.match(self.root, include=include, exclude=exclude)
1506
1508
1507 def narrowmatch(self, match=None, includeexact=False):
1509 def narrowmatch(self, match=None, includeexact=False):
1508 """matcher corresponding the the repo's narrowspec
1510 """matcher corresponding the the repo's narrowspec
1509
1511
1510 If `match` is given, then that will be intersected with the narrow
1512 If `match` is given, then that will be intersected with the narrow
1511 matcher.
1513 matcher.
1512
1514
1513 If `includeexact` is True, then any exact matches from `match` will
1515 If `includeexact` is True, then any exact matches from `match` will
1514 be included even if they're outside the narrowspec.
1516 be included even if they're outside the narrowspec.
1515 """
1517 """
1516 if match:
1518 if match:
1517 if includeexact and not self._narrowmatch.always():
1519 if includeexact and not self._narrowmatch.always():
1518 # do not exclude explicitly-specified paths so that they can
1520 # do not exclude explicitly-specified paths so that they can
1519 # be warned later on
1521 # be warned later on
1520 em = matchmod.exact(match.files())
1522 em = matchmod.exact(match.files())
1521 nm = matchmod.unionmatcher([self._narrowmatch, em])
1523 nm = matchmod.unionmatcher([self._narrowmatch, em])
1522 return matchmod.intersectmatchers(match, nm)
1524 return matchmod.intersectmatchers(match, nm)
1523 return matchmod.intersectmatchers(match, self._narrowmatch)
1525 return matchmod.intersectmatchers(match, self._narrowmatch)
1524 return self._narrowmatch
1526 return self._narrowmatch
1525
1527
1526 def setnarrowpats(self, newincludes, newexcludes):
1528 def setnarrowpats(self, newincludes, newexcludes):
1527 narrowspec.save(self, newincludes, newexcludes)
1529 narrowspec.save(self, newincludes, newexcludes)
1528 self.invalidate(clearfilecache=True)
1530 self.invalidate(clearfilecache=True)
1529
1531
1530 @unfilteredpropertycache
1532 @unfilteredpropertycache
1531 def _quick_access_changeid_null(self):
1533 def _quick_access_changeid_null(self):
1532 return {
1534 return {
1533 b'null': (nullrev, nullid),
1535 b'null': (nullrev, nullid),
1534 nullrev: (nullrev, nullid),
1536 nullrev: (nullrev, nullid),
1535 nullid: (nullrev, nullid),
1537 nullid: (nullrev, nullid),
1536 }
1538 }
1537
1539
1538 @unfilteredpropertycache
1540 @unfilteredpropertycache
1539 def _quick_access_changeid_wc(self):
1541 def _quick_access_changeid_wc(self):
1540 # also fast path access to the working copy parents
1542 # also fast path access to the working copy parents
1541 # however, only do it for filter that ensure wc is visible.
1543 # however, only do it for filter that ensure wc is visible.
1542 quick = {}
1544 quick = {}
1543 cl = self.unfiltered().changelog
1545 cl = self.unfiltered().changelog
1544 for node in self.dirstate.parents():
1546 for node in self.dirstate.parents():
1545 if node == nullid:
1547 if node == nullid:
1546 continue
1548 continue
1547 rev = cl.index.get_rev(node)
1549 rev = cl.index.get_rev(node)
1548 if rev is None:
1550 if rev is None:
1549 # unknown working copy parent case:
1551 # unknown working copy parent case:
1550 #
1552 #
1551 # skip the fast path and let higher code deal with it
1553 # skip the fast path and let higher code deal with it
1552 continue
1554 continue
1553 pair = (rev, node)
1555 pair = (rev, node)
1554 quick[rev] = pair
1556 quick[rev] = pair
1555 quick[node] = pair
1557 quick[node] = pair
1556 # also add the parents of the parents
1558 # also add the parents of the parents
1557 for r in cl.parentrevs(rev):
1559 for r in cl.parentrevs(rev):
1558 if r == nullrev:
1560 if r == nullrev:
1559 continue
1561 continue
1560 n = cl.node(r)
1562 n = cl.node(r)
1561 pair = (r, n)
1563 pair = (r, n)
1562 quick[r] = pair
1564 quick[r] = pair
1563 quick[n] = pair
1565 quick[n] = pair
1564 p1node = self.dirstate.p1()
1566 p1node = self.dirstate.p1()
1565 if p1node != nullid:
1567 if p1node != nullid:
1566 quick[b'.'] = quick[p1node]
1568 quick[b'.'] = quick[p1node]
1567 return quick
1569 return quick
1568
1570
1569 @unfilteredmethod
1571 @unfilteredmethod
1570 def _quick_access_changeid_invalidate(self):
1572 def _quick_access_changeid_invalidate(self):
1571 if '_quick_access_changeid_wc' in vars(self):
1573 if '_quick_access_changeid_wc' in vars(self):
1572 del self.__dict__['_quick_access_changeid_wc']
1574 del self.__dict__['_quick_access_changeid_wc']
1573
1575
1574 @property
1576 @property
1575 def _quick_access_changeid(self):
1577 def _quick_access_changeid(self):
1576 """an helper dictionnary for __getitem__ calls
1578 """an helper dictionnary for __getitem__ calls
1577
1579
1578 This contains a list of symbol we can recognise right away without
1580 This contains a list of symbol we can recognise right away without
1579 further processing.
1581 further processing.
1580 """
1582 """
1581 mapping = self._quick_access_changeid_null
1583 mapping = self._quick_access_changeid_null
1582 if self.filtername in repoview.filter_has_wc:
1584 if self.filtername in repoview.filter_has_wc:
1583 mapping = mapping.copy()
1585 mapping = mapping.copy()
1584 mapping.update(self._quick_access_changeid_wc)
1586 mapping.update(self._quick_access_changeid_wc)
1585 return mapping
1587 return mapping
1586
1588
1587 def __getitem__(self, changeid):
1589 def __getitem__(self, changeid):
1588 # dealing with special cases
1590 # dealing with special cases
1589 if changeid is None:
1591 if changeid is None:
1590 return context.workingctx(self)
1592 return context.workingctx(self)
1591 if isinstance(changeid, context.basectx):
1593 if isinstance(changeid, context.basectx):
1592 return changeid
1594 return changeid
1593
1595
1594 # dealing with multiple revisions
1596 # dealing with multiple revisions
1595 if isinstance(changeid, slice):
1597 if isinstance(changeid, slice):
1596 # wdirrev isn't contiguous so the slice shouldn't include it
1598 # wdirrev isn't contiguous so the slice shouldn't include it
1597 return [
1599 return [
1598 self[i]
1600 self[i]
1599 for i in pycompat.xrange(*changeid.indices(len(self)))
1601 for i in pycompat.xrange(*changeid.indices(len(self)))
1600 if i not in self.changelog.filteredrevs
1602 if i not in self.changelog.filteredrevs
1601 ]
1603 ]
1602
1604
1603 # dealing with some special values
1605 # dealing with some special values
1604 quick_access = self._quick_access_changeid.get(changeid)
1606 quick_access = self._quick_access_changeid.get(changeid)
1605 if quick_access is not None:
1607 if quick_access is not None:
1606 rev, node = quick_access
1608 rev, node = quick_access
1607 return context.changectx(self, rev, node, maybe_filtered=False)
1609 return context.changectx(self, rev, node, maybe_filtered=False)
1608 if changeid == b'tip':
1610 if changeid == b'tip':
1609 node = self.changelog.tip()
1611 node = self.changelog.tip()
1610 rev = self.changelog.rev(node)
1612 rev = self.changelog.rev(node)
1611 return context.changectx(self, rev, node)
1613 return context.changectx(self, rev, node)
1612
1614
1613 # dealing with arbitrary values
1615 # dealing with arbitrary values
1614 try:
1616 try:
1615 if isinstance(changeid, int):
1617 if isinstance(changeid, int):
1616 node = self.changelog.node(changeid)
1618 node = self.changelog.node(changeid)
1617 rev = changeid
1619 rev = changeid
1618 elif changeid == b'.':
1620 elif changeid == b'.':
1619 # this is a hack to delay/avoid loading obsmarkers
1621 # this is a hack to delay/avoid loading obsmarkers
1620 # when we know that '.' won't be hidden
1622 # when we know that '.' won't be hidden
1621 node = self.dirstate.p1()
1623 node = self.dirstate.p1()
1622 rev = self.unfiltered().changelog.rev(node)
1624 rev = self.unfiltered().changelog.rev(node)
1623 elif len(changeid) == 20:
1625 elif len(changeid) == 20:
1624 try:
1626 try:
1625 node = changeid
1627 node = changeid
1626 rev = self.changelog.rev(changeid)
1628 rev = self.changelog.rev(changeid)
1627 except error.FilteredLookupError:
1629 except error.FilteredLookupError:
1628 changeid = hex(changeid) # for the error message
1630 changeid = hex(changeid) # for the error message
1629 raise
1631 raise
1630 except LookupError:
1632 except LookupError:
1631 # check if it might have come from damaged dirstate
1633 # check if it might have come from damaged dirstate
1632 #
1634 #
1633 # XXX we could avoid the unfiltered if we had a recognizable
1635 # XXX we could avoid the unfiltered if we had a recognizable
1634 # exception for filtered changeset access
1636 # exception for filtered changeset access
1635 if (
1637 if (
1636 self.local()
1638 self.local()
1637 and changeid in self.unfiltered().dirstate.parents()
1639 and changeid in self.unfiltered().dirstate.parents()
1638 ):
1640 ):
1639 msg = _(b"working directory has unknown parent '%s'!")
1641 msg = _(b"working directory has unknown parent '%s'!")
1640 raise error.Abort(msg % short(changeid))
1642 raise error.Abort(msg % short(changeid))
1641 changeid = hex(changeid) # for the error message
1643 changeid = hex(changeid) # for the error message
1642 raise
1644 raise
1643
1645
1644 elif len(changeid) == 40:
1646 elif len(changeid) == 40:
1645 node = bin(changeid)
1647 node = bin(changeid)
1646 rev = self.changelog.rev(node)
1648 rev = self.changelog.rev(node)
1647 else:
1649 else:
1648 raise error.ProgrammingError(
1650 raise error.ProgrammingError(
1649 b"unsupported changeid '%s' of type %s"
1651 b"unsupported changeid '%s' of type %s"
1650 % (changeid, pycompat.bytestr(type(changeid)))
1652 % (changeid, pycompat.bytestr(type(changeid)))
1651 )
1653 )
1652
1654
1653 return context.changectx(self, rev, node)
1655 return context.changectx(self, rev, node)
1654
1656
1655 except (error.FilteredIndexError, error.FilteredLookupError):
1657 except (error.FilteredIndexError, error.FilteredLookupError):
1656 raise error.FilteredRepoLookupError(
1658 raise error.FilteredRepoLookupError(
1657 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1659 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1658 )
1660 )
1659 except (IndexError, LookupError):
1661 except (IndexError, LookupError):
1660 raise error.RepoLookupError(
1662 raise error.RepoLookupError(
1661 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1663 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1662 )
1664 )
1663 except error.WdirUnsupported:
1665 except error.WdirUnsupported:
1664 return context.workingctx(self)
1666 return context.workingctx(self)
1665
1667
1666 def __contains__(self, changeid):
1668 def __contains__(self, changeid):
1667 """True if the given changeid exists
1669 """True if the given changeid exists
1668
1670
1669 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1671 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1670 specified.
1672 specified.
1671 """
1673 """
1672 try:
1674 try:
1673 self[changeid]
1675 self[changeid]
1674 return True
1676 return True
1675 except error.RepoLookupError:
1677 except error.RepoLookupError:
1676 return False
1678 return False
1677
1679
1678 def __nonzero__(self):
1680 def __nonzero__(self):
1679 return True
1681 return True
1680
1682
1681 __bool__ = __nonzero__
1683 __bool__ = __nonzero__
1682
1684
1683 def __len__(self):
1685 def __len__(self):
1684 # no need to pay the cost of repoview.changelog
1686 # no need to pay the cost of repoview.changelog
1685 unfi = self.unfiltered()
1687 unfi = self.unfiltered()
1686 return len(unfi.changelog)
1688 return len(unfi.changelog)
1687
1689
1688 def __iter__(self):
1690 def __iter__(self):
1689 return iter(self.changelog)
1691 return iter(self.changelog)
1690
1692
1691 def revs(self, expr, *args):
1693 def revs(self, expr, *args):
1692 '''Find revisions matching a revset.
1694 '''Find revisions matching a revset.
1693
1695
1694 The revset is specified as a string ``expr`` that may contain
1696 The revset is specified as a string ``expr`` that may contain
1695 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1697 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1696
1698
1697 Revset aliases from the configuration are not expanded. To expand
1699 Revset aliases from the configuration are not expanded. To expand
1698 user aliases, consider calling ``scmutil.revrange()`` or
1700 user aliases, consider calling ``scmutil.revrange()`` or
1699 ``repo.anyrevs([expr], user=True)``.
1701 ``repo.anyrevs([expr], user=True)``.
1700
1702
1701 Returns a smartset.abstractsmartset, which is a list-like interface
1703 Returns a smartset.abstractsmartset, which is a list-like interface
1702 that contains integer revisions.
1704 that contains integer revisions.
1703 '''
1705 '''
1704 tree = revsetlang.spectree(expr, *args)
1706 tree = revsetlang.spectree(expr, *args)
1705 return revset.makematcher(tree)(self)
1707 return revset.makematcher(tree)(self)
1706
1708
1707 def set(self, expr, *args):
1709 def set(self, expr, *args):
1708 '''Find revisions matching a revset and emit changectx instances.
1710 '''Find revisions matching a revset and emit changectx instances.
1709
1711
1710 This is a convenience wrapper around ``revs()`` that iterates the
1712 This is a convenience wrapper around ``revs()`` that iterates the
1711 result and is a generator of changectx instances.
1713 result and is a generator of changectx instances.
1712
1714
1713 Revset aliases from the configuration are not expanded. To expand
1715 Revset aliases from the configuration are not expanded. To expand
1714 user aliases, consider calling ``scmutil.revrange()``.
1716 user aliases, consider calling ``scmutil.revrange()``.
1715 '''
1717 '''
1716 for r in self.revs(expr, *args):
1718 for r in self.revs(expr, *args):
1717 yield self[r]
1719 yield self[r]
1718
1720
1719 def anyrevs(self, specs, user=False, localalias=None):
1721 def anyrevs(self, specs, user=False, localalias=None):
1720 '''Find revisions matching one of the given revsets.
1722 '''Find revisions matching one of the given revsets.
1721
1723
1722 Revset aliases from the configuration are not expanded by default. To
1724 Revset aliases from the configuration are not expanded by default. To
1723 expand user aliases, specify ``user=True``. To provide some local
1725 expand user aliases, specify ``user=True``. To provide some local
1724 definitions overriding user aliases, set ``localalias`` to
1726 definitions overriding user aliases, set ``localalias`` to
1725 ``{name: definitionstring}``.
1727 ``{name: definitionstring}``.
1726 '''
1728 '''
1727 if specs == [b'null']:
1729 if specs == [b'null']:
1728 return revset.baseset([nullrev])
1730 return revset.baseset([nullrev])
1729 if specs == [b'.']:
1731 if specs == [b'.']:
1730 quick_data = self._quick_access_changeid.get(b'.')
1732 quick_data = self._quick_access_changeid.get(b'.')
1731 if quick_data is not None:
1733 if quick_data is not None:
1732 return revset.baseset([quick_data[0]])
1734 return revset.baseset([quick_data[0]])
1733 if user:
1735 if user:
1734 m = revset.matchany(
1736 m = revset.matchany(
1735 self.ui,
1737 self.ui,
1736 specs,
1738 specs,
1737 lookup=revset.lookupfn(self),
1739 lookup=revset.lookupfn(self),
1738 localalias=localalias,
1740 localalias=localalias,
1739 )
1741 )
1740 else:
1742 else:
1741 m = revset.matchany(None, specs, localalias=localalias)
1743 m = revset.matchany(None, specs, localalias=localalias)
1742 return m(self)
1744 return m(self)
1743
1745
1744 def url(self):
1746 def url(self):
1745 return b'file:' + self.root
1747 return b'file:' + self.root
1746
1748
1747 def hook(self, name, throw=False, **args):
1749 def hook(self, name, throw=False, **args):
1748 """Call a hook, passing this repo instance.
1750 """Call a hook, passing this repo instance.
1749
1751
1750 This a convenience method to aid invoking hooks. Extensions likely
1752 This a convenience method to aid invoking hooks. Extensions likely
1751 won't call this unless they have registered a custom hook or are
1753 won't call this unless they have registered a custom hook or are
1752 replacing code that is expected to call a hook.
1754 replacing code that is expected to call a hook.
1753 """
1755 """
1754 return hook.hook(self.ui, self, name, throw, **args)
1756 return hook.hook(self.ui, self, name, throw, **args)
1755
1757
1756 @filteredpropertycache
1758 @filteredpropertycache
1757 def _tagscache(self):
1759 def _tagscache(self):
1758 '''Returns a tagscache object that contains various tags related
1760 '''Returns a tagscache object that contains various tags related
1759 caches.'''
1761 caches.'''
1760
1762
1761 # This simplifies its cache management by having one decorated
1763 # This simplifies its cache management by having one decorated
1762 # function (this one) and the rest simply fetch things from it.
1764 # function (this one) and the rest simply fetch things from it.
1763 class tagscache(object):
1765 class tagscache(object):
1764 def __init__(self):
1766 def __init__(self):
1765 # These two define the set of tags for this repository. tags
1767 # These two define the set of tags for this repository. tags
1766 # maps tag name to node; tagtypes maps tag name to 'global' or
1768 # maps tag name to node; tagtypes maps tag name to 'global' or
1767 # 'local'. (Global tags are defined by .hgtags across all
1769 # 'local'. (Global tags are defined by .hgtags across all
1768 # heads, and local tags are defined in .hg/localtags.)
1770 # heads, and local tags are defined in .hg/localtags.)
1769 # They constitute the in-memory cache of tags.
1771 # They constitute the in-memory cache of tags.
1770 self.tags = self.tagtypes = None
1772 self.tags = self.tagtypes = None
1771
1773
1772 self.nodetagscache = self.tagslist = None
1774 self.nodetagscache = self.tagslist = None
1773
1775
1774 cache = tagscache()
1776 cache = tagscache()
1775 cache.tags, cache.tagtypes = self._findtags()
1777 cache.tags, cache.tagtypes = self._findtags()
1776
1778
1777 return cache
1779 return cache
1778
1780
1779 def tags(self):
1781 def tags(self):
1780 '''return a mapping of tag to node'''
1782 '''return a mapping of tag to node'''
1781 t = {}
1783 t = {}
1782 if self.changelog.filteredrevs:
1784 if self.changelog.filteredrevs:
1783 tags, tt = self._findtags()
1785 tags, tt = self._findtags()
1784 else:
1786 else:
1785 tags = self._tagscache.tags
1787 tags = self._tagscache.tags
1786 rev = self.changelog.rev
1788 rev = self.changelog.rev
1787 for k, v in pycompat.iteritems(tags):
1789 for k, v in pycompat.iteritems(tags):
1788 try:
1790 try:
1789 # ignore tags to unknown nodes
1791 # ignore tags to unknown nodes
1790 rev(v)
1792 rev(v)
1791 t[k] = v
1793 t[k] = v
1792 except (error.LookupError, ValueError):
1794 except (error.LookupError, ValueError):
1793 pass
1795 pass
1794 return t
1796 return t
1795
1797
1796 def _findtags(self):
1798 def _findtags(self):
1797 '''Do the hard work of finding tags. Return a pair of dicts
1799 '''Do the hard work of finding tags. Return a pair of dicts
1798 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1800 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1799 maps tag name to a string like \'global\' or \'local\'.
1801 maps tag name to a string like \'global\' or \'local\'.
1800 Subclasses or extensions are free to add their own tags, but
1802 Subclasses or extensions are free to add their own tags, but
1801 should be aware that the returned dicts will be retained for the
1803 should be aware that the returned dicts will be retained for the
1802 duration of the localrepo object.'''
1804 duration of the localrepo object.'''
1803
1805
1804 # XXX what tagtype should subclasses/extensions use? Currently
1806 # XXX what tagtype should subclasses/extensions use? Currently
1805 # mq and bookmarks add tags, but do not set the tagtype at all.
1807 # mq and bookmarks add tags, but do not set the tagtype at all.
1806 # Should each extension invent its own tag type? Should there
1808 # Should each extension invent its own tag type? Should there
1807 # be one tagtype for all such "virtual" tags? Or is the status
1809 # be one tagtype for all such "virtual" tags? Or is the status
1808 # quo fine?
1810 # quo fine?
1809
1811
1810 # map tag name to (node, hist)
1812 # map tag name to (node, hist)
1811 alltags = tagsmod.findglobaltags(self.ui, self)
1813 alltags = tagsmod.findglobaltags(self.ui, self)
1812 # map tag name to tag type
1814 # map tag name to tag type
1813 tagtypes = {tag: b'global' for tag in alltags}
1815 tagtypes = {tag: b'global' for tag in alltags}
1814
1816
1815 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1817 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1816
1818
1817 # Build the return dicts. Have to re-encode tag names because
1819 # Build the return dicts. Have to re-encode tag names because
1818 # the tags module always uses UTF-8 (in order not to lose info
1820 # the tags module always uses UTF-8 (in order not to lose info
1819 # writing to the cache), but the rest of Mercurial wants them in
1821 # writing to the cache), but the rest of Mercurial wants them in
1820 # local encoding.
1822 # local encoding.
1821 tags = {}
1823 tags = {}
1822 for (name, (node, hist)) in pycompat.iteritems(alltags):
1824 for (name, (node, hist)) in pycompat.iteritems(alltags):
1823 if node != nullid:
1825 if node != nullid:
1824 tags[encoding.tolocal(name)] = node
1826 tags[encoding.tolocal(name)] = node
1825 tags[b'tip'] = self.changelog.tip()
1827 tags[b'tip'] = self.changelog.tip()
1826 tagtypes = {
1828 tagtypes = {
1827 encoding.tolocal(name): value
1829 encoding.tolocal(name): value
1828 for (name, value) in pycompat.iteritems(tagtypes)
1830 for (name, value) in pycompat.iteritems(tagtypes)
1829 }
1831 }
1830 return (tags, tagtypes)
1832 return (tags, tagtypes)
1831
1833
1832 def tagtype(self, tagname):
1834 def tagtype(self, tagname):
1833 '''
1835 '''
1834 return the type of the given tag. result can be:
1836 return the type of the given tag. result can be:
1835
1837
1836 'local' : a local tag
1838 'local' : a local tag
1837 'global' : a global tag
1839 'global' : a global tag
1838 None : tag does not exist
1840 None : tag does not exist
1839 '''
1841 '''
1840
1842
1841 return self._tagscache.tagtypes.get(tagname)
1843 return self._tagscache.tagtypes.get(tagname)
1842
1844
1843 def tagslist(self):
1845 def tagslist(self):
1844 '''return a list of tags ordered by revision'''
1846 '''return a list of tags ordered by revision'''
1845 if not self._tagscache.tagslist:
1847 if not self._tagscache.tagslist:
1846 l = []
1848 l = []
1847 for t, n in pycompat.iteritems(self.tags()):
1849 for t, n in pycompat.iteritems(self.tags()):
1848 l.append((self.changelog.rev(n), t, n))
1850 l.append((self.changelog.rev(n), t, n))
1849 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1851 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1850
1852
1851 return self._tagscache.tagslist
1853 return self._tagscache.tagslist
1852
1854
1853 def nodetags(self, node):
1855 def nodetags(self, node):
1854 '''return the tags associated with a node'''
1856 '''return the tags associated with a node'''
1855 if not self._tagscache.nodetagscache:
1857 if not self._tagscache.nodetagscache:
1856 nodetagscache = {}
1858 nodetagscache = {}
1857 for t, n in pycompat.iteritems(self._tagscache.tags):
1859 for t, n in pycompat.iteritems(self._tagscache.tags):
1858 nodetagscache.setdefault(n, []).append(t)
1860 nodetagscache.setdefault(n, []).append(t)
1859 for tags in pycompat.itervalues(nodetagscache):
1861 for tags in pycompat.itervalues(nodetagscache):
1860 tags.sort()
1862 tags.sort()
1861 self._tagscache.nodetagscache = nodetagscache
1863 self._tagscache.nodetagscache = nodetagscache
1862 return self._tagscache.nodetagscache.get(node, [])
1864 return self._tagscache.nodetagscache.get(node, [])
1863
1865
1864 def nodebookmarks(self, node):
1866 def nodebookmarks(self, node):
1865 """return the list of bookmarks pointing to the specified node"""
1867 """return the list of bookmarks pointing to the specified node"""
1866 return self._bookmarks.names(node)
1868 return self._bookmarks.names(node)
1867
1869
1868 def branchmap(self):
1870 def branchmap(self):
1869 '''returns a dictionary {branch: [branchheads]} with branchheads
1871 '''returns a dictionary {branch: [branchheads]} with branchheads
1870 ordered by increasing revision number'''
1872 ordered by increasing revision number'''
1871 return self._branchcaches[self]
1873 return self._branchcaches[self]
1872
1874
1873 @unfilteredmethod
1875 @unfilteredmethod
1874 def revbranchcache(self):
1876 def revbranchcache(self):
1875 if not self._revbranchcache:
1877 if not self._revbranchcache:
1876 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1878 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1877 return self._revbranchcache
1879 return self._revbranchcache
1878
1880
1879 def branchtip(self, branch, ignoremissing=False):
1881 def branchtip(self, branch, ignoremissing=False):
1880 '''return the tip node for a given branch
1882 '''return the tip node for a given branch
1881
1883
1882 If ignoremissing is True, then this method will not raise an error.
1884 If ignoremissing is True, then this method will not raise an error.
1883 This is helpful for callers that only expect None for a missing branch
1885 This is helpful for callers that only expect None for a missing branch
1884 (e.g. namespace).
1886 (e.g. namespace).
1885
1887
1886 '''
1888 '''
1887 try:
1889 try:
1888 return self.branchmap().branchtip(branch)
1890 return self.branchmap().branchtip(branch)
1889 except KeyError:
1891 except KeyError:
1890 if not ignoremissing:
1892 if not ignoremissing:
1891 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1893 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1892 else:
1894 else:
1893 pass
1895 pass
1894
1896
1895 def lookup(self, key):
1897 def lookup(self, key):
1896 node = scmutil.revsymbol(self, key).node()
1898 node = scmutil.revsymbol(self, key).node()
1897 if node is None:
1899 if node is None:
1898 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1900 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1899 return node
1901 return node
1900
1902
1901 def lookupbranch(self, key):
1903 def lookupbranch(self, key):
1902 if self.branchmap().hasbranch(key):
1904 if self.branchmap().hasbranch(key):
1903 return key
1905 return key
1904
1906
1905 return scmutil.revsymbol(self, key).branch()
1907 return scmutil.revsymbol(self, key).branch()
1906
1908
1907 def known(self, nodes):
1909 def known(self, nodes):
1908 cl = self.changelog
1910 cl = self.changelog
1909 get_rev = cl.index.get_rev
1911 get_rev = cl.index.get_rev
1910 filtered = cl.filteredrevs
1912 filtered = cl.filteredrevs
1911 result = []
1913 result = []
1912 for n in nodes:
1914 for n in nodes:
1913 r = get_rev(n)
1915 r = get_rev(n)
1914 resp = not (r is None or r in filtered)
1916 resp = not (r is None or r in filtered)
1915 result.append(resp)
1917 result.append(resp)
1916 return result
1918 return result
1917
1919
1918 def local(self):
1920 def local(self):
1919 return self
1921 return self
1920
1922
1921 def publishing(self):
1923 def publishing(self):
1922 # it's safe (and desirable) to trust the publish flag unconditionally
1924 # it's safe (and desirable) to trust the publish flag unconditionally
1923 # so that we don't finalize changes shared between users via ssh or nfs
1925 # so that we don't finalize changes shared between users via ssh or nfs
1924 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1926 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1925
1927
1926 def cancopy(self):
1928 def cancopy(self):
1927 # so statichttprepo's override of local() works
1929 # so statichttprepo's override of local() works
1928 if not self.local():
1930 if not self.local():
1929 return False
1931 return False
1930 if not self.publishing():
1932 if not self.publishing():
1931 return True
1933 return True
1932 # if publishing we can't copy if there is filtered content
1934 # if publishing we can't copy if there is filtered content
1933 return not self.filtered(b'visible').changelog.filteredrevs
1935 return not self.filtered(b'visible').changelog.filteredrevs
1934
1936
1935 def shared(self):
1937 def shared(self):
1936 '''the type of shared repository (None if not shared)'''
1938 '''the type of shared repository (None if not shared)'''
1937 if self.sharedpath != self.path:
1939 if self.sharedpath != self.path:
1938 return b'store'
1940 return b'store'
1939 return None
1941 return None
1940
1942
1941 def wjoin(self, f, *insidef):
1943 def wjoin(self, f, *insidef):
1942 return self.vfs.reljoin(self.root, f, *insidef)
1944 return self.vfs.reljoin(self.root, f, *insidef)
1943
1945
1944 def setparents(self, p1, p2=nullid):
1946 def setparents(self, p1, p2=nullid):
1945 self[None].setparents(p1, p2)
1947 self[None].setparents(p1, p2)
1946 self._quick_access_changeid_invalidate()
1948 self._quick_access_changeid_invalidate()
1947
1949
1948 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1950 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1949 """changeid must be a changeset revision, if specified.
1951 """changeid must be a changeset revision, if specified.
1950 fileid can be a file revision or node."""
1952 fileid can be a file revision or node."""
1951 return context.filectx(
1953 return context.filectx(
1952 self, path, changeid, fileid, changectx=changectx
1954 self, path, changeid, fileid, changectx=changectx
1953 )
1955 )
1954
1956
1955 def getcwd(self):
1957 def getcwd(self):
1956 return self.dirstate.getcwd()
1958 return self.dirstate.getcwd()
1957
1959
1958 def pathto(self, f, cwd=None):
1960 def pathto(self, f, cwd=None):
1959 return self.dirstate.pathto(f, cwd)
1961 return self.dirstate.pathto(f, cwd)
1960
1962
1961 def _loadfilter(self, filter):
1963 def _loadfilter(self, filter):
1962 if filter not in self._filterpats:
1964 if filter not in self._filterpats:
1963 l = []
1965 l = []
1964 for pat, cmd in self.ui.configitems(filter):
1966 for pat, cmd in self.ui.configitems(filter):
1965 if cmd == b'!':
1967 if cmd == b'!':
1966 continue
1968 continue
1967 mf = matchmod.match(self.root, b'', [pat])
1969 mf = matchmod.match(self.root, b'', [pat])
1968 fn = None
1970 fn = None
1969 params = cmd
1971 params = cmd
1970 for name, filterfn in pycompat.iteritems(self._datafilters):
1972 for name, filterfn in pycompat.iteritems(self._datafilters):
1971 if cmd.startswith(name):
1973 if cmd.startswith(name):
1972 fn = filterfn
1974 fn = filterfn
1973 params = cmd[len(name) :].lstrip()
1975 params = cmd[len(name) :].lstrip()
1974 break
1976 break
1975 if not fn:
1977 if not fn:
1976 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1978 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1977 fn.__name__ = 'commandfilter'
1979 fn.__name__ = 'commandfilter'
1978 # Wrap old filters not supporting keyword arguments
1980 # Wrap old filters not supporting keyword arguments
1979 if not pycompat.getargspec(fn)[2]:
1981 if not pycompat.getargspec(fn)[2]:
1980 oldfn = fn
1982 oldfn = fn
1981 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1983 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1982 fn.__name__ = 'compat-' + oldfn.__name__
1984 fn.__name__ = 'compat-' + oldfn.__name__
1983 l.append((mf, fn, params))
1985 l.append((mf, fn, params))
1984 self._filterpats[filter] = l
1986 self._filterpats[filter] = l
1985 return self._filterpats[filter]
1987 return self._filterpats[filter]
1986
1988
1987 def _filter(self, filterpats, filename, data):
1989 def _filter(self, filterpats, filename, data):
1988 for mf, fn, cmd in filterpats:
1990 for mf, fn, cmd in filterpats:
1989 if mf(filename):
1991 if mf(filename):
1990 self.ui.debug(
1992 self.ui.debug(
1991 b"filtering %s through %s\n"
1993 b"filtering %s through %s\n"
1992 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1994 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1993 )
1995 )
1994 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1996 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1995 break
1997 break
1996
1998
1997 return data
1999 return data
1998
2000
1999 @unfilteredpropertycache
2001 @unfilteredpropertycache
2000 def _encodefilterpats(self):
2002 def _encodefilterpats(self):
2001 return self._loadfilter(b'encode')
2003 return self._loadfilter(b'encode')
2002
2004
2003 @unfilteredpropertycache
2005 @unfilteredpropertycache
2004 def _decodefilterpats(self):
2006 def _decodefilterpats(self):
2005 return self._loadfilter(b'decode')
2007 return self._loadfilter(b'decode')
2006
2008
2007 def adddatafilter(self, name, filter):
2009 def adddatafilter(self, name, filter):
2008 self._datafilters[name] = filter
2010 self._datafilters[name] = filter
2009
2011
2010 def wread(self, filename):
2012 def wread(self, filename):
2011 if self.wvfs.islink(filename):
2013 if self.wvfs.islink(filename):
2012 data = self.wvfs.readlink(filename)
2014 data = self.wvfs.readlink(filename)
2013 else:
2015 else:
2014 data = self.wvfs.read(filename)
2016 data = self.wvfs.read(filename)
2015 return self._filter(self._encodefilterpats, filename, data)
2017 return self._filter(self._encodefilterpats, filename, data)
2016
2018
2017 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2019 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2018 """write ``data`` into ``filename`` in the working directory
2020 """write ``data`` into ``filename`` in the working directory
2019
2021
2020 This returns length of written (maybe decoded) data.
2022 This returns length of written (maybe decoded) data.
2021 """
2023 """
2022 data = self._filter(self._decodefilterpats, filename, data)
2024 data = self._filter(self._decodefilterpats, filename, data)
2023 if b'l' in flags:
2025 if b'l' in flags:
2024 self.wvfs.symlink(data, filename)
2026 self.wvfs.symlink(data, filename)
2025 else:
2027 else:
2026 self.wvfs.write(
2028 self.wvfs.write(
2027 filename, data, backgroundclose=backgroundclose, **kwargs
2029 filename, data, backgroundclose=backgroundclose, **kwargs
2028 )
2030 )
2029 if b'x' in flags:
2031 if b'x' in flags:
2030 self.wvfs.setflags(filename, False, True)
2032 self.wvfs.setflags(filename, False, True)
2031 else:
2033 else:
2032 self.wvfs.setflags(filename, False, False)
2034 self.wvfs.setflags(filename, False, False)
2033 return len(data)
2035 return len(data)
2034
2036
2035 def wwritedata(self, filename, data):
2037 def wwritedata(self, filename, data):
2036 return self._filter(self._decodefilterpats, filename, data)
2038 return self._filter(self._decodefilterpats, filename, data)
2037
2039
2038 def currenttransaction(self):
2040 def currenttransaction(self):
2039 """return the current transaction or None if non exists"""
2041 """return the current transaction or None if non exists"""
2040 if self._transref:
2042 if self._transref:
2041 tr = self._transref()
2043 tr = self._transref()
2042 else:
2044 else:
2043 tr = None
2045 tr = None
2044
2046
2045 if tr and tr.running():
2047 if tr and tr.running():
2046 return tr
2048 return tr
2047 return None
2049 return None
2048
2050
2049 def transaction(self, desc, report=None):
2051 def transaction(self, desc, report=None):
2050 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2052 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2051 b'devel', b'check-locks'
2053 b'devel', b'check-locks'
2052 ):
2054 ):
2053 if self._currentlock(self._lockref) is None:
2055 if self._currentlock(self._lockref) is None:
2054 raise error.ProgrammingError(b'transaction requires locking')
2056 raise error.ProgrammingError(b'transaction requires locking')
2055 tr = self.currenttransaction()
2057 tr = self.currenttransaction()
2056 if tr is not None:
2058 if tr is not None:
2057 return tr.nest(name=desc)
2059 return tr.nest(name=desc)
2058
2060
2059 # abort here if the journal already exists
2061 # abort here if the journal already exists
2060 if self.svfs.exists(b"journal"):
2062 if self.svfs.exists(b"journal"):
2061 raise error.RepoError(
2063 raise error.RepoError(
2062 _(b"abandoned transaction found"),
2064 _(b"abandoned transaction found"),
2063 hint=_(b"run 'hg recover' to clean up transaction"),
2065 hint=_(b"run 'hg recover' to clean up transaction"),
2064 )
2066 )
2065
2067
2066 idbase = b"%.40f#%f" % (random.random(), time.time())
2068 idbase = b"%.40f#%f" % (random.random(), time.time())
2067 ha = hex(hashutil.sha1(idbase).digest())
2069 ha = hex(hashutil.sha1(idbase).digest())
2068 txnid = b'TXN:' + ha
2070 txnid = b'TXN:' + ha
2069 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2071 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2070
2072
2071 self._writejournal(desc)
2073 self._writejournal(desc)
2072 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2074 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2073 if report:
2075 if report:
2074 rp = report
2076 rp = report
2075 else:
2077 else:
2076 rp = self.ui.warn
2078 rp = self.ui.warn
2077 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2079 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2078 # we must avoid cyclic reference between repo and transaction.
2080 # we must avoid cyclic reference between repo and transaction.
2079 reporef = weakref.ref(self)
2081 reporef = weakref.ref(self)
2080 # Code to track tag movement
2082 # Code to track tag movement
2081 #
2083 #
2082 # Since tags are all handled as file content, it is actually quite hard
2084 # Since tags are all handled as file content, it is actually quite hard
2083 # to track these movement from a code perspective. So we fallback to a
2085 # to track these movement from a code perspective. So we fallback to a
2084 # tracking at the repository level. One could envision to track changes
2086 # tracking at the repository level. One could envision to track changes
2085 # to the '.hgtags' file through changegroup apply but that fails to
2087 # to the '.hgtags' file through changegroup apply but that fails to
2086 # cope with case where transaction expose new heads without changegroup
2088 # cope with case where transaction expose new heads without changegroup
2087 # being involved (eg: phase movement).
2089 # being involved (eg: phase movement).
2088 #
2090 #
2089 # For now, We gate the feature behind a flag since this likely comes
2091 # For now, We gate the feature behind a flag since this likely comes
2090 # with performance impacts. The current code run more often than needed
2092 # with performance impacts. The current code run more often than needed
2091 # and do not use caches as much as it could. The current focus is on
2093 # and do not use caches as much as it could. The current focus is on
2092 # the behavior of the feature so we disable it by default. The flag
2094 # the behavior of the feature so we disable it by default. The flag
2093 # will be removed when we are happy with the performance impact.
2095 # will be removed when we are happy with the performance impact.
2094 #
2096 #
2095 # Once this feature is no longer experimental move the following
2097 # Once this feature is no longer experimental move the following
2096 # documentation to the appropriate help section:
2098 # documentation to the appropriate help section:
2097 #
2099 #
2098 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2100 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2099 # tags (new or changed or deleted tags). In addition the details of
2101 # tags (new or changed or deleted tags). In addition the details of
2100 # these changes are made available in a file at:
2102 # these changes are made available in a file at:
2101 # ``REPOROOT/.hg/changes/tags.changes``.
2103 # ``REPOROOT/.hg/changes/tags.changes``.
2102 # Make sure you check for HG_TAG_MOVED before reading that file as it
2104 # Make sure you check for HG_TAG_MOVED before reading that file as it
2103 # might exist from a previous transaction even if no tag were touched
2105 # might exist from a previous transaction even if no tag were touched
2104 # in this one. Changes are recorded in a line base format::
2106 # in this one. Changes are recorded in a line base format::
2105 #
2107 #
2106 # <action> <hex-node> <tag-name>\n
2108 # <action> <hex-node> <tag-name>\n
2107 #
2109 #
2108 # Actions are defined as follow:
2110 # Actions are defined as follow:
2109 # "-R": tag is removed,
2111 # "-R": tag is removed,
2110 # "+A": tag is added,
2112 # "+A": tag is added,
2111 # "-M": tag is moved (old value),
2113 # "-M": tag is moved (old value),
2112 # "+M": tag is moved (new value),
2114 # "+M": tag is moved (new value),
2113 tracktags = lambda x: None
2115 tracktags = lambda x: None
2114 # experimental config: experimental.hook-track-tags
2116 # experimental config: experimental.hook-track-tags
2115 shouldtracktags = self.ui.configbool(
2117 shouldtracktags = self.ui.configbool(
2116 b'experimental', b'hook-track-tags'
2118 b'experimental', b'hook-track-tags'
2117 )
2119 )
2118 if desc != b'strip' and shouldtracktags:
2120 if desc != b'strip' and shouldtracktags:
2119 oldheads = self.changelog.headrevs()
2121 oldheads = self.changelog.headrevs()
2120
2122
2121 def tracktags(tr2):
2123 def tracktags(tr2):
2122 repo = reporef()
2124 repo = reporef()
2123 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2125 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2124 newheads = repo.changelog.headrevs()
2126 newheads = repo.changelog.headrevs()
2125 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2127 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2126 # notes: we compare lists here.
2128 # notes: we compare lists here.
2127 # As we do it only once buiding set would not be cheaper
2129 # As we do it only once buiding set would not be cheaper
2128 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2130 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2129 if changes:
2131 if changes:
2130 tr2.hookargs[b'tag_moved'] = b'1'
2132 tr2.hookargs[b'tag_moved'] = b'1'
2131 with repo.vfs(
2133 with repo.vfs(
2132 b'changes/tags.changes', b'w', atomictemp=True
2134 b'changes/tags.changes', b'w', atomictemp=True
2133 ) as changesfile:
2135 ) as changesfile:
2134 # note: we do not register the file to the transaction
2136 # note: we do not register the file to the transaction
2135 # because we needs it to still exist on the transaction
2137 # because we needs it to still exist on the transaction
2136 # is close (for txnclose hooks)
2138 # is close (for txnclose hooks)
2137 tagsmod.writediff(changesfile, changes)
2139 tagsmod.writediff(changesfile, changes)
2138
2140
2139 def validate(tr2):
2141 def validate(tr2):
2140 """will run pre-closing hooks"""
2142 """will run pre-closing hooks"""
2141 # XXX the transaction API is a bit lacking here so we take a hacky
2143 # XXX the transaction API is a bit lacking here so we take a hacky
2142 # path for now
2144 # path for now
2143 #
2145 #
2144 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2146 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2145 # dict is copied before these run. In addition we needs the data
2147 # dict is copied before these run. In addition we needs the data
2146 # available to in memory hooks too.
2148 # available to in memory hooks too.
2147 #
2149 #
2148 # Moreover, we also need to make sure this runs before txnclose
2150 # Moreover, we also need to make sure this runs before txnclose
2149 # hooks and there is no "pending" mechanism that would execute
2151 # hooks and there is no "pending" mechanism that would execute
2150 # logic only if hooks are about to run.
2152 # logic only if hooks are about to run.
2151 #
2153 #
2152 # Fixing this limitation of the transaction is also needed to track
2154 # Fixing this limitation of the transaction is also needed to track
2153 # other families of changes (bookmarks, phases, obsolescence).
2155 # other families of changes (bookmarks, phases, obsolescence).
2154 #
2156 #
2155 # This will have to be fixed before we remove the experimental
2157 # This will have to be fixed before we remove the experimental
2156 # gating.
2158 # gating.
2157 tracktags(tr2)
2159 tracktags(tr2)
2158 repo = reporef()
2160 repo = reporef()
2159
2161
2160 singleheadopt = (b'experimental', b'single-head-per-branch')
2162 singleheadopt = (b'experimental', b'single-head-per-branch')
2161 singlehead = repo.ui.configbool(*singleheadopt)
2163 singlehead = repo.ui.configbool(*singleheadopt)
2162 if singlehead:
2164 if singlehead:
2163 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2165 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2164 accountclosed = singleheadsub.get(
2166 accountclosed = singleheadsub.get(
2165 b"account-closed-heads", False
2167 b"account-closed-heads", False
2166 )
2168 )
2167 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2169 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2168 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2170 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2169 for name, (old, new) in sorted(
2171 for name, (old, new) in sorted(
2170 tr.changes[b'bookmarks'].items()
2172 tr.changes[b'bookmarks'].items()
2171 ):
2173 ):
2172 args = tr.hookargs.copy()
2174 args = tr.hookargs.copy()
2173 args.update(bookmarks.preparehookargs(name, old, new))
2175 args.update(bookmarks.preparehookargs(name, old, new))
2174 repo.hook(
2176 repo.hook(
2175 b'pretxnclose-bookmark',
2177 b'pretxnclose-bookmark',
2176 throw=True,
2178 throw=True,
2177 **pycompat.strkwargs(args)
2179 **pycompat.strkwargs(args)
2178 )
2180 )
2179 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2181 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2180 cl = repo.unfiltered().changelog
2182 cl = repo.unfiltered().changelog
2181 for revs, (old, new) in tr.changes[b'phases']:
2183 for revs, (old, new) in tr.changes[b'phases']:
2182 for rev in revs:
2184 for rev in revs:
2183 args = tr.hookargs.copy()
2185 args = tr.hookargs.copy()
2184 node = hex(cl.node(rev))
2186 node = hex(cl.node(rev))
2185 args.update(phases.preparehookargs(node, old, new))
2187 args.update(phases.preparehookargs(node, old, new))
2186 repo.hook(
2188 repo.hook(
2187 b'pretxnclose-phase',
2189 b'pretxnclose-phase',
2188 throw=True,
2190 throw=True,
2189 **pycompat.strkwargs(args)
2191 **pycompat.strkwargs(args)
2190 )
2192 )
2191
2193
2192 repo.hook(
2194 repo.hook(
2193 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2195 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2194 )
2196 )
2195
2197
2196 def releasefn(tr, success):
2198 def releasefn(tr, success):
2197 repo = reporef()
2199 repo = reporef()
2198 if repo is None:
2200 if repo is None:
2199 # If the repo has been GC'd (and this release function is being
2201 # If the repo has been GC'd (and this release function is being
2200 # called from transaction.__del__), there's not much we can do,
2202 # called from transaction.__del__), there's not much we can do,
2201 # so just leave the unfinished transaction there and let the
2203 # so just leave the unfinished transaction there and let the
2202 # user run `hg recover`.
2204 # user run `hg recover`.
2203 return
2205 return
2204 if success:
2206 if success:
2205 # this should be explicitly invoked here, because
2207 # this should be explicitly invoked here, because
2206 # in-memory changes aren't written out at closing
2208 # in-memory changes aren't written out at closing
2207 # transaction, if tr.addfilegenerator (via
2209 # transaction, if tr.addfilegenerator (via
2208 # dirstate.write or so) isn't invoked while
2210 # dirstate.write or so) isn't invoked while
2209 # transaction running
2211 # transaction running
2210 repo.dirstate.write(None)
2212 repo.dirstate.write(None)
2211 else:
2213 else:
2212 # discard all changes (including ones already written
2214 # discard all changes (including ones already written
2213 # out) in this transaction
2215 # out) in this transaction
2214 narrowspec.restorebackup(self, b'journal.narrowspec')
2216 narrowspec.restorebackup(self, b'journal.narrowspec')
2215 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2217 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2216 repo.dirstate.restorebackup(None, b'journal.dirstate')
2218 repo.dirstate.restorebackup(None, b'journal.dirstate')
2217
2219
2218 repo.invalidate(clearfilecache=True)
2220 repo.invalidate(clearfilecache=True)
2219
2221
2220 tr = transaction.transaction(
2222 tr = transaction.transaction(
2221 rp,
2223 rp,
2222 self.svfs,
2224 self.svfs,
2223 vfsmap,
2225 vfsmap,
2224 b"journal",
2226 b"journal",
2225 b"undo",
2227 b"undo",
2226 aftertrans(renames),
2228 aftertrans(renames),
2227 self.store.createmode,
2229 self.store.createmode,
2228 validator=validate,
2230 validator=validate,
2229 releasefn=releasefn,
2231 releasefn=releasefn,
2230 checkambigfiles=_cachedfiles,
2232 checkambigfiles=_cachedfiles,
2231 name=desc,
2233 name=desc,
2232 )
2234 )
2233 tr.changes[b'origrepolen'] = len(self)
2235 tr.changes[b'origrepolen'] = len(self)
2234 tr.changes[b'obsmarkers'] = set()
2236 tr.changes[b'obsmarkers'] = set()
2235 tr.changes[b'phases'] = []
2237 tr.changes[b'phases'] = []
2236 tr.changes[b'bookmarks'] = {}
2238 tr.changes[b'bookmarks'] = {}
2237
2239
2238 tr.hookargs[b'txnid'] = txnid
2240 tr.hookargs[b'txnid'] = txnid
2239 tr.hookargs[b'txnname'] = desc
2241 tr.hookargs[b'txnname'] = desc
2240 # note: writing the fncache only during finalize mean that the file is
2242 # note: writing the fncache only during finalize mean that the file is
2241 # outdated when running hooks. As fncache is used for streaming clone,
2243 # outdated when running hooks. As fncache is used for streaming clone,
2242 # this is not expected to break anything that happen during the hooks.
2244 # this is not expected to break anything that happen during the hooks.
2243 tr.addfinalize(b'flush-fncache', self.store.write)
2245 tr.addfinalize(b'flush-fncache', self.store.write)
2244
2246
2245 def txnclosehook(tr2):
2247 def txnclosehook(tr2):
2246 """To be run if transaction is successful, will schedule a hook run
2248 """To be run if transaction is successful, will schedule a hook run
2247 """
2249 """
2248 # Don't reference tr2 in hook() so we don't hold a reference.
2250 # Don't reference tr2 in hook() so we don't hold a reference.
2249 # This reduces memory consumption when there are multiple
2251 # This reduces memory consumption when there are multiple
2250 # transactions per lock. This can likely go away if issue5045
2252 # transactions per lock. This can likely go away if issue5045
2251 # fixes the function accumulation.
2253 # fixes the function accumulation.
2252 hookargs = tr2.hookargs
2254 hookargs = tr2.hookargs
2253
2255
2254 def hookfunc(unused_success):
2256 def hookfunc(unused_success):
2255 repo = reporef()
2257 repo = reporef()
2256 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2258 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2257 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2259 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2258 for name, (old, new) in bmchanges:
2260 for name, (old, new) in bmchanges:
2259 args = tr.hookargs.copy()
2261 args = tr.hookargs.copy()
2260 args.update(bookmarks.preparehookargs(name, old, new))
2262 args.update(bookmarks.preparehookargs(name, old, new))
2261 repo.hook(
2263 repo.hook(
2262 b'txnclose-bookmark',
2264 b'txnclose-bookmark',
2263 throw=False,
2265 throw=False,
2264 **pycompat.strkwargs(args)
2266 **pycompat.strkwargs(args)
2265 )
2267 )
2266
2268
2267 if hook.hashook(repo.ui, b'txnclose-phase'):
2269 if hook.hashook(repo.ui, b'txnclose-phase'):
2268 cl = repo.unfiltered().changelog
2270 cl = repo.unfiltered().changelog
2269 phasemv = sorted(
2271 phasemv = sorted(
2270 tr.changes[b'phases'], key=lambda r: r[0][0]
2272 tr.changes[b'phases'], key=lambda r: r[0][0]
2271 )
2273 )
2272 for revs, (old, new) in phasemv:
2274 for revs, (old, new) in phasemv:
2273 for rev in revs:
2275 for rev in revs:
2274 args = tr.hookargs.copy()
2276 args = tr.hookargs.copy()
2275 node = hex(cl.node(rev))
2277 node = hex(cl.node(rev))
2276 args.update(phases.preparehookargs(node, old, new))
2278 args.update(phases.preparehookargs(node, old, new))
2277 repo.hook(
2279 repo.hook(
2278 b'txnclose-phase',
2280 b'txnclose-phase',
2279 throw=False,
2281 throw=False,
2280 **pycompat.strkwargs(args)
2282 **pycompat.strkwargs(args)
2281 )
2283 )
2282
2284
2283 repo.hook(
2285 repo.hook(
2284 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2286 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2285 )
2287 )
2286
2288
2287 reporef()._afterlock(hookfunc)
2289 reporef()._afterlock(hookfunc)
2288
2290
2289 tr.addfinalize(b'txnclose-hook', txnclosehook)
2291 tr.addfinalize(b'txnclose-hook', txnclosehook)
2290 # Include a leading "-" to make it happen before the transaction summary
2292 # Include a leading "-" to make it happen before the transaction summary
2291 # reports registered via scmutil.registersummarycallback() whose names
2293 # reports registered via scmutil.registersummarycallback() whose names
2292 # are 00-txnreport etc. That way, the caches will be warm when the
2294 # are 00-txnreport etc. That way, the caches will be warm when the
2293 # callbacks run.
2295 # callbacks run.
2294 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2296 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2295
2297
2296 def txnaborthook(tr2):
2298 def txnaborthook(tr2):
2297 """To be run if transaction is aborted
2299 """To be run if transaction is aborted
2298 """
2300 """
2299 reporef().hook(
2301 reporef().hook(
2300 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2302 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2301 )
2303 )
2302
2304
2303 tr.addabort(b'txnabort-hook', txnaborthook)
2305 tr.addabort(b'txnabort-hook', txnaborthook)
2304 # avoid eager cache invalidation. in-memory data should be identical
2306 # avoid eager cache invalidation. in-memory data should be identical
2305 # to stored data if transaction has no error.
2307 # to stored data if transaction has no error.
2306 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2308 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2307 self._transref = weakref.ref(tr)
2309 self._transref = weakref.ref(tr)
2308 scmutil.registersummarycallback(self, tr, desc)
2310 scmutil.registersummarycallback(self, tr, desc)
2309 return tr
2311 return tr
2310
2312
2311 def _journalfiles(self):
2313 def _journalfiles(self):
2312 return (
2314 return (
2313 (self.svfs, b'journal'),
2315 (self.svfs, b'journal'),
2314 (self.svfs, b'journal.narrowspec'),
2316 (self.svfs, b'journal.narrowspec'),
2315 (self.vfs, b'journal.narrowspec.dirstate'),
2317 (self.vfs, b'journal.narrowspec.dirstate'),
2316 (self.vfs, b'journal.dirstate'),
2318 (self.vfs, b'journal.dirstate'),
2317 (self.vfs, b'journal.branch'),
2319 (self.vfs, b'journal.branch'),
2318 (self.vfs, b'journal.desc'),
2320 (self.vfs, b'journal.desc'),
2319 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2321 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2320 (self.svfs, b'journal.phaseroots'),
2322 (self.svfs, b'journal.phaseroots'),
2321 )
2323 )
2322
2324
2323 def undofiles(self):
2325 def undofiles(self):
2324 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2326 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2325
2327
2326 @unfilteredmethod
2328 @unfilteredmethod
2327 def _writejournal(self, desc):
2329 def _writejournal(self, desc):
2328 self.dirstate.savebackup(None, b'journal.dirstate')
2330 self.dirstate.savebackup(None, b'journal.dirstate')
2329 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2331 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2330 narrowspec.savebackup(self, b'journal.narrowspec')
2332 narrowspec.savebackup(self, b'journal.narrowspec')
2331 self.vfs.write(
2333 self.vfs.write(
2332 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2334 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2333 )
2335 )
2334 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2336 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2335 bookmarksvfs = bookmarks.bookmarksvfs(self)
2337 bookmarksvfs = bookmarks.bookmarksvfs(self)
2336 bookmarksvfs.write(
2338 bookmarksvfs.write(
2337 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2339 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2338 )
2340 )
2339 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2341 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2340
2342
2341 def recover(self):
2343 def recover(self):
2342 with self.lock():
2344 with self.lock():
2343 if self.svfs.exists(b"journal"):
2345 if self.svfs.exists(b"journal"):
2344 self.ui.status(_(b"rolling back interrupted transaction\n"))
2346 self.ui.status(_(b"rolling back interrupted transaction\n"))
2345 vfsmap = {
2347 vfsmap = {
2346 b'': self.svfs,
2348 b'': self.svfs,
2347 b'plain': self.vfs,
2349 b'plain': self.vfs,
2348 }
2350 }
2349 transaction.rollback(
2351 transaction.rollback(
2350 self.svfs,
2352 self.svfs,
2351 vfsmap,
2353 vfsmap,
2352 b"journal",
2354 b"journal",
2353 self.ui.warn,
2355 self.ui.warn,
2354 checkambigfiles=_cachedfiles,
2356 checkambigfiles=_cachedfiles,
2355 )
2357 )
2356 self.invalidate()
2358 self.invalidate()
2357 return True
2359 return True
2358 else:
2360 else:
2359 self.ui.warn(_(b"no interrupted transaction available\n"))
2361 self.ui.warn(_(b"no interrupted transaction available\n"))
2360 return False
2362 return False
2361
2363
2362 def rollback(self, dryrun=False, force=False):
2364 def rollback(self, dryrun=False, force=False):
2363 wlock = lock = dsguard = None
2365 wlock = lock = dsguard = None
2364 try:
2366 try:
2365 wlock = self.wlock()
2367 wlock = self.wlock()
2366 lock = self.lock()
2368 lock = self.lock()
2367 if self.svfs.exists(b"undo"):
2369 if self.svfs.exists(b"undo"):
2368 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2370 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2369
2371
2370 return self._rollback(dryrun, force, dsguard)
2372 return self._rollback(dryrun, force, dsguard)
2371 else:
2373 else:
2372 self.ui.warn(_(b"no rollback information available\n"))
2374 self.ui.warn(_(b"no rollback information available\n"))
2373 return 1
2375 return 1
2374 finally:
2376 finally:
2375 release(dsguard, lock, wlock)
2377 release(dsguard, lock, wlock)
2376
2378
2377 @unfilteredmethod # Until we get smarter cache management
2379 @unfilteredmethod # Until we get smarter cache management
2378 def _rollback(self, dryrun, force, dsguard):
2380 def _rollback(self, dryrun, force, dsguard):
2379 ui = self.ui
2381 ui = self.ui
2380 try:
2382 try:
2381 args = self.vfs.read(b'undo.desc').splitlines()
2383 args = self.vfs.read(b'undo.desc').splitlines()
2382 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2384 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2383 if len(args) >= 3:
2385 if len(args) >= 3:
2384 detail = args[2]
2386 detail = args[2]
2385 oldtip = oldlen - 1
2387 oldtip = oldlen - 1
2386
2388
2387 if detail and ui.verbose:
2389 if detail and ui.verbose:
2388 msg = _(
2390 msg = _(
2389 b'repository tip rolled back to revision %d'
2391 b'repository tip rolled back to revision %d'
2390 b' (undo %s: %s)\n'
2392 b' (undo %s: %s)\n'
2391 ) % (oldtip, desc, detail)
2393 ) % (oldtip, desc, detail)
2392 else:
2394 else:
2393 msg = _(
2395 msg = _(
2394 b'repository tip rolled back to revision %d (undo %s)\n'
2396 b'repository tip rolled back to revision %d (undo %s)\n'
2395 ) % (oldtip, desc)
2397 ) % (oldtip, desc)
2396 except IOError:
2398 except IOError:
2397 msg = _(b'rolling back unknown transaction\n')
2399 msg = _(b'rolling back unknown transaction\n')
2398 desc = None
2400 desc = None
2399
2401
2400 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2402 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2401 raise error.Abort(
2403 raise error.Abort(
2402 _(
2404 _(
2403 b'rollback of last commit while not checked out '
2405 b'rollback of last commit while not checked out '
2404 b'may lose data'
2406 b'may lose data'
2405 ),
2407 ),
2406 hint=_(b'use -f to force'),
2408 hint=_(b'use -f to force'),
2407 )
2409 )
2408
2410
2409 ui.status(msg)
2411 ui.status(msg)
2410 if dryrun:
2412 if dryrun:
2411 return 0
2413 return 0
2412
2414
2413 parents = self.dirstate.parents()
2415 parents = self.dirstate.parents()
2414 self.destroying()
2416 self.destroying()
2415 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2417 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2416 transaction.rollback(
2418 transaction.rollback(
2417 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2419 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2418 )
2420 )
2419 bookmarksvfs = bookmarks.bookmarksvfs(self)
2421 bookmarksvfs = bookmarks.bookmarksvfs(self)
2420 if bookmarksvfs.exists(b'undo.bookmarks'):
2422 if bookmarksvfs.exists(b'undo.bookmarks'):
2421 bookmarksvfs.rename(
2423 bookmarksvfs.rename(
2422 b'undo.bookmarks', b'bookmarks', checkambig=True
2424 b'undo.bookmarks', b'bookmarks', checkambig=True
2423 )
2425 )
2424 if self.svfs.exists(b'undo.phaseroots'):
2426 if self.svfs.exists(b'undo.phaseroots'):
2425 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2427 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2426 self.invalidate()
2428 self.invalidate()
2427
2429
2428 has_node = self.changelog.index.has_node
2430 has_node = self.changelog.index.has_node
2429 parentgone = any(not has_node(p) for p in parents)
2431 parentgone = any(not has_node(p) for p in parents)
2430 if parentgone:
2432 if parentgone:
2431 # prevent dirstateguard from overwriting already restored one
2433 # prevent dirstateguard from overwriting already restored one
2432 dsguard.close()
2434 dsguard.close()
2433
2435
2434 narrowspec.restorebackup(self, b'undo.narrowspec')
2436 narrowspec.restorebackup(self, b'undo.narrowspec')
2435 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2437 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2436 self.dirstate.restorebackup(None, b'undo.dirstate')
2438 self.dirstate.restorebackup(None, b'undo.dirstate')
2437 try:
2439 try:
2438 branch = self.vfs.read(b'undo.branch')
2440 branch = self.vfs.read(b'undo.branch')
2439 self.dirstate.setbranch(encoding.tolocal(branch))
2441 self.dirstate.setbranch(encoding.tolocal(branch))
2440 except IOError:
2442 except IOError:
2441 ui.warn(
2443 ui.warn(
2442 _(
2444 _(
2443 b'named branch could not be reset: '
2445 b'named branch could not be reset: '
2444 b'current branch is still \'%s\'\n'
2446 b'current branch is still \'%s\'\n'
2445 )
2447 )
2446 % self.dirstate.branch()
2448 % self.dirstate.branch()
2447 )
2449 )
2448
2450
2449 parents = tuple([p.rev() for p in self[None].parents()])
2451 parents = tuple([p.rev() for p in self[None].parents()])
2450 if len(parents) > 1:
2452 if len(parents) > 1:
2451 ui.status(
2453 ui.status(
2452 _(
2454 _(
2453 b'working directory now based on '
2455 b'working directory now based on '
2454 b'revisions %d and %d\n'
2456 b'revisions %d and %d\n'
2455 )
2457 )
2456 % parents
2458 % parents
2457 )
2459 )
2458 else:
2460 else:
2459 ui.status(
2461 ui.status(
2460 _(b'working directory now based on revision %d\n') % parents
2462 _(b'working directory now based on revision %d\n') % parents
2461 )
2463 )
2462 mergemod.mergestate.clean(self, self[b'.'].node())
2464 mergemod.mergestate.clean(self, self[b'.'].node())
2463
2465
2464 # TODO: if we know which new heads may result from this rollback, pass
2466 # TODO: if we know which new heads may result from this rollback, pass
2465 # them to destroy(), which will prevent the branchhead cache from being
2467 # them to destroy(), which will prevent the branchhead cache from being
2466 # invalidated.
2468 # invalidated.
2467 self.destroyed()
2469 self.destroyed()
2468 return 0
2470 return 0
2469
2471
2470 def _buildcacheupdater(self, newtransaction):
2472 def _buildcacheupdater(self, newtransaction):
2471 """called during transaction to build the callback updating cache
2473 """called during transaction to build the callback updating cache
2472
2474
2473 Lives on the repository to help extension who might want to augment
2475 Lives on the repository to help extension who might want to augment
2474 this logic. For this purpose, the created transaction is passed to the
2476 this logic. For this purpose, the created transaction is passed to the
2475 method.
2477 method.
2476 """
2478 """
2477 # we must avoid cyclic reference between repo and transaction.
2479 # we must avoid cyclic reference between repo and transaction.
2478 reporef = weakref.ref(self)
2480 reporef = weakref.ref(self)
2479
2481
2480 def updater(tr):
2482 def updater(tr):
2481 repo = reporef()
2483 repo = reporef()
2482 repo.updatecaches(tr)
2484 repo.updatecaches(tr)
2483
2485
2484 return updater
2486 return updater
2485
2487
2486 @unfilteredmethod
2488 @unfilteredmethod
2487 def updatecaches(self, tr=None, full=False):
2489 def updatecaches(self, tr=None, full=False):
2488 """warm appropriate caches
2490 """warm appropriate caches
2489
2491
2490 If this function is called after a transaction closed. The transaction
2492 If this function is called after a transaction closed. The transaction
2491 will be available in the 'tr' argument. This can be used to selectively
2493 will be available in the 'tr' argument. This can be used to selectively
2492 update caches relevant to the changes in that transaction.
2494 update caches relevant to the changes in that transaction.
2493
2495
2494 If 'full' is set, make sure all caches the function knows about have
2496 If 'full' is set, make sure all caches the function knows about have
2495 up-to-date data. Even the ones usually loaded more lazily.
2497 up-to-date data. Even the ones usually loaded more lazily.
2496 """
2498 """
2497 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2499 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2498 # During strip, many caches are invalid but
2500 # During strip, many caches are invalid but
2499 # later call to `destroyed` will refresh them.
2501 # later call to `destroyed` will refresh them.
2500 return
2502 return
2501
2503
2502 if tr is None or tr.changes[b'origrepolen'] < len(self):
2504 if tr is None or tr.changes[b'origrepolen'] < len(self):
2503 # accessing the 'ser ved' branchmap should refresh all the others,
2505 # accessing the 'ser ved' branchmap should refresh all the others,
2504 self.ui.debug(b'updating the branch cache\n')
2506 self.ui.debug(b'updating the branch cache\n')
2505 self.filtered(b'served').branchmap()
2507 self.filtered(b'served').branchmap()
2506 self.filtered(b'served.hidden').branchmap()
2508 self.filtered(b'served.hidden').branchmap()
2507
2509
2508 if full:
2510 if full:
2509 unfi = self.unfiltered()
2511 unfi = self.unfiltered()
2510
2512
2511 self.changelog.update_caches(transaction=tr)
2513 self.changelog.update_caches(transaction=tr)
2512
2514
2513 rbc = unfi.revbranchcache()
2515 rbc = unfi.revbranchcache()
2514 for r in unfi.changelog:
2516 for r in unfi.changelog:
2515 rbc.branchinfo(r)
2517 rbc.branchinfo(r)
2516 rbc.write()
2518 rbc.write()
2517
2519
2518 # ensure the working copy parents are in the manifestfulltextcache
2520 # ensure the working copy parents are in the manifestfulltextcache
2519 for ctx in self[b'.'].parents():
2521 for ctx in self[b'.'].parents():
2520 ctx.manifest() # accessing the manifest is enough
2522 ctx.manifest() # accessing the manifest is enough
2521
2523
2522 # accessing fnode cache warms the cache
2524 # accessing fnode cache warms the cache
2523 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2525 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2524 # accessing tags warm the cache
2526 # accessing tags warm the cache
2525 self.tags()
2527 self.tags()
2526 self.filtered(b'served').tags()
2528 self.filtered(b'served').tags()
2527
2529
2528 # The `full` arg is documented as updating even the lazily-loaded
2530 # The `full` arg is documented as updating even the lazily-loaded
2529 # caches immediately, so we're forcing a write to cause these caches
2531 # caches immediately, so we're forcing a write to cause these caches
2530 # to be warmed up even if they haven't explicitly been requested
2532 # to be warmed up even if they haven't explicitly been requested
2531 # yet (if they've never been used by hg, they won't ever have been
2533 # yet (if they've never been used by hg, they won't ever have been
2532 # written, even if they're a subset of another kind of cache that
2534 # written, even if they're a subset of another kind of cache that
2533 # *has* been used).
2535 # *has* been used).
2534 for filt in repoview.filtertable.keys():
2536 for filt in repoview.filtertable.keys():
2535 filtered = self.filtered(filt)
2537 filtered = self.filtered(filt)
2536 filtered.branchmap().write(filtered)
2538 filtered.branchmap().write(filtered)
2537
2539
2538 def invalidatecaches(self):
2540 def invalidatecaches(self):
2539
2541
2540 if '_tagscache' in vars(self):
2542 if '_tagscache' in vars(self):
2541 # can't use delattr on proxy
2543 # can't use delattr on proxy
2542 del self.__dict__['_tagscache']
2544 del self.__dict__['_tagscache']
2543
2545
2544 self._branchcaches.clear()
2546 self._branchcaches.clear()
2545 self.invalidatevolatilesets()
2547 self.invalidatevolatilesets()
2546 self._sparsesignaturecache.clear()
2548 self._sparsesignaturecache.clear()
2547
2549
2548 def invalidatevolatilesets(self):
2550 def invalidatevolatilesets(self):
2549 self.filteredrevcache.clear()
2551 self.filteredrevcache.clear()
2550 obsolete.clearobscaches(self)
2552 obsolete.clearobscaches(self)
2551 self._quick_access_changeid_invalidate()
2553 self._quick_access_changeid_invalidate()
2552
2554
2553 def invalidatedirstate(self):
2555 def invalidatedirstate(self):
2554 '''Invalidates the dirstate, causing the next call to dirstate
2556 '''Invalidates the dirstate, causing the next call to dirstate
2555 to check if it was modified since the last time it was read,
2557 to check if it was modified since the last time it was read,
2556 rereading it if it has.
2558 rereading it if it has.
2557
2559
2558 This is different to dirstate.invalidate() that it doesn't always
2560 This is different to dirstate.invalidate() that it doesn't always
2559 rereads the dirstate. Use dirstate.invalidate() if you want to
2561 rereads the dirstate. Use dirstate.invalidate() if you want to
2560 explicitly read the dirstate again (i.e. restoring it to a previous
2562 explicitly read the dirstate again (i.e. restoring it to a previous
2561 known good state).'''
2563 known good state).'''
2562 if hasunfilteredcache(self, 'dirstate'):
2564 if hasunfilteredcache(self, 'dirstate'):
2563 for k in self.dirstate._filecache:
2565 for k in self.dirstate._filecache:
2564 try:
2566 try:
2565 delattr(self.dirstate, k)
2567 delattr(self.dirstate, k)
2566 except AttributeError:
2568 except AttributeError:
2567 pass
2569 pass
2568 delattr(self.unfiltered(), 'dirstate')
2570 delattr(self.unfiltered(), 'dirstate')
2569
2571
2570 def invalidate(self, clearfilecache=False):
2572 def invalidate(self, clearfilecache=False):
2571 '''Invalidates both store and non-store parts other than dirstate
2573 '''Invalidates both store and non-store parts other than dirstate
2572
2574
2573 If a transaction is running, invalidation of store is omitted,
2575 If a transaction is running, invalidation of store is omitted,
2574 because discarding in-memory changes might cause inconsistency
2576 because discarding in-memory changes might cause inconsistency
2575 (e.g. incomplete fncache causes unintentional failure, but
2577 (e.g. incomplete fncache causes unintentional failure, but
2576 redundant one doesn't).
2578 redundant one doesn't).
2577 '''
2579 '''
2578 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2580 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2579 for k in list(self._filecache.keys()):
2581 for k in list(self._filecache.keys()):
2580 # dirstate is invalidated separately in invalidatedirstate()
2582 # dirstate is invalidated separately in invalidatedirstate()
2581 if k == b'dirstate':
2583 if k == b'dirstate':
2582 continue
2584 continue
2583 if (
2585 if (
2584 k == b'changelog'
2586 k == b'changelog'
2585 and self.currenttransaction()
2587 and self.currenttransaction()
2586 and self.changelog._delayed
2588 and self.changelog._delayed
2587 ):
2589 ):
2588 # The changelog object may store unwritten revisions. We don't
2590 # The changelog object may store unwritten revisions. We don't
2589 # want to lose them.
2591 # want to lose them.
2590 # TODO: Solve the problem instead of working around it.
2592 # TODO: Solve the problem instead of working around it.
2591 continue
2593 continue
2592
2594
2593 if clearfilecache:
2595 if clearfilecache:
2594 del self._filecache[k]
2596 del self._filecache[k]
2595 try:
2597 try:
2596 delattr(unfiltered, k)
2598 delattr(unfiltered, k)
2597 except AttributeError:
2599 except AttributeError:
2598 pass
2600 pass
2599 self.invalidatecaches()
2601 self.invalidatecaches()
2600 if not self.currenttransaction():
2602 if not self.currenttransaction():
2601 # TODO: Changing contents of store outside transaction
2603 # TODO: Changing contents of store outside transaction
2602 # causes inconsistency. We should make in-memory store
2604 # causes inconsistency. We should make in-memory store
2603 # changes detectable, and abort if changed.
2605 # changes detectable, and abort if changed.
2604 self.store.invalidatecaches()
2606 self.store.invalidatecaches()
2605
2607
2606 def invalidateall(self):
2608 def invalidateall(self):
2607 '''Fully invalidates both store and non-store parts, causing the
2609 '''Fully invalidates both store and non-store parts, causing the
2608 subsequent operation to reread any outside changes.'''
2610 subsequent operation to reread any outside changes.'''
2609 # extension should hook this to invalidate its caches
2611 # extension should hook this to invalidate its caches
2610 self.invalidate()
2612 self.invalidate()
2611 self.invalidatedirstate()
2613 self.invalidatedirstate()
2612
2614
2613 @unfilteredmethod
2615 @unfilteredmethod
2614 def _refreshfilecachestats(self, tr):
2616 def _refreshfilecachestats(self, tr):
2615 """Reload stats of cached files so that they are flagged as valid"""
2617 """Reload stats of cached files so that they are flagged as valid"""
2616 for k, ce in self._filecache.items():
2618 for k, ce in self._filecache.items():
2617 k = pycompat.sysstr(k)
2619 k = pycompat.sysstr(k)
2618 if k == 'dirstate' or k not in self.__dict__:
2620 if k == 'dirstate' or k not in self.__dict__:
2619 continue
2621 continue
2620 ce.refresh()
2622 ce.refresh()
2621
2623
2622 def _lock(
2624 def _lock(
2623 self,
2625 self,
2624 vfs,
2626 vfs,
2625 lockname,
2627 lockname,
2626 wait,
2628 wait,
2627 releasefn,
2629 releasefn,
2628 acquirefn,
2630 acquirefn,
2629 desc,
2631 desc,
2630 inheritchecker=None,
2632 inheritchecker=None,
2631 parentenvvar=None,
2633 parentenvvar=None,
2632 ):
2634 ):
2633 parentlock = None
2635 parentlock = None
2634 # the contents of parentenvvar are used by the underlying lock to
2636 # the contents of parentenvvar are used by the underlying lock to
2635 # determine whether it can be inherited
2637 # determine whether it can be inherited
2636 if parentenvvar is not None:
2638 if parentenvvar is not None:
2637 parentlock = encoding.environ.get(parentenvvar)
2639 parentlock = encoding.environ.get(parentenvvar)
2638
2640
2639 timeout = 0
2641 timeout = 0
2640 warntimeout = 0
2642 warntimeout = 0
2641 if wait:
2643 if wait:
2642 timeout = self.ui.configint(b"ui", b"timeout")
2644 timeout = self.ui.configint(b"ui", b"timeout")
2643 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2645 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2644 # internal config: ui.signal-safe-lock
2646 # internal config: ui.signal-safe-lock
2645 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2647 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2646
2648
2647 l = lockmod.trylock(
2649 l = lockmod.trylock(
2648 self.ui,
2650 self.ui,
2649 vfs,
2651 vfs,
2650 lockname,
2652 lockname,
2651 timeout,
2653 timeout,
2652 warntimeout,
2654 warntimeout,
2653 releasefn=releasefn,
2655 releasefn=releasefn,
2654 acquirefn=acquirefn,
2656 acquirefn=acquirefn,
2655 desc=desc,
2657 desc=desc,
2656 inheritchecker=inheritchecker,
2658 inheritchecker=inheritchecker,
2657 parentlock=parentlock,
2659 parentlock=parentlock,
2658 signalsafe=signalsafe,
2660 signalsafe=signalsafe,
2659 )
2661 )
2660 return l
2662 return l
2661
2663
2662 def _afterlock(self, callback):
2664 def _afterlock(self, callback):
2663 """add a callback to be run when the repository is fully unlocked
2665 """add a callback to be run when the repository is fully unlocked
2664
2666
2665 The callback will be executed when the outermost lock is released
2667 The callback will be executed when the outermost lock is released
2666 (with wlock being higher level than 'lock')."""
2668 (with wlock being higher level than 'lock')."""
2667 for ref in (self._wlockref, self._lockref):
2669 for ref in (self._wlockref, self._lockref):
2668 l = ref and ref()
2670 l = ref and ref()
2669 if l and l.held:
2671 if l and l.held:
2670 l.postrelease.append(callback)
2672 l.postrelease.append(callback)
2671 break
2673 break
2672 else: # no lock have been found.
2674 else: # no lock have been found.
2673 callback(True)
2675 callback(True)
2674
2676
2675 def lock(self, wait=True):
2677 def lock(self, wait=True):
2676 '''Lock the repository store (.hg/store) and return a weak reference
2678 '''Lock the repository store (.hg/store) and return a weak reference
2677 to the lock. Use this before modifying the store (e.g. committing or
2679 to the lock. Use this before modifying the store (e.g. committing or
2678 stripping). If you are opening a transaction, get a lock as well.)
2680 stripping). If you are opening a transaction, get a lock as well.)
2679
2681
2680 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2682 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2681 'wlock' first to avoid a dead-lock hazard.'''
2683 'wlock' first to avoid a dead-lock hazard.'''
2682 l = self._currentlock(self._lockref)
2684 l = self._currentlock(self._lockref)
2683 if l is not None:
2685 if l is not None:
2684 l.lock()
2686 l.lock()
2685 return l
2687 return l
2686
2688
2687 l = self._lock(
2689 l = self._lock(
2688 vfs=self.svfs,
2690 vfs=self.svfs,
2689 lockname=b"lock",
2691 lockname=b"lock",
2690 wait=wait,
2692 wait=wait,
2691 releasefn=None,
2693 releasefn=None,
2692 acquirefn=self.invalidate,
2694 acquirefn=self.invalidate,
2693 desc=_(b'repository %s') % self.origroot,
2695 desc=_(b'repository %s') % self.origroot,
2694 )
2696 )
2695 self._lockref = weakref.ref(l)
2697 self._lockref = weakref.ref(l)
2696 return l
2698 return l
2697
2699
2698 def _wlockchecktransaction(self):
2700 def _wlockchecktransaction(self):
2699 if self.currenttransaction() is not None:
2701 if self.currenttransaction() is not None:
2700 raise error.LockInheritanceContractViolation(
2702 raise error.LockInheritanceContractViolation(
2701 b'wlock cannot be inherited in the middle of a transaction'
2703 b'wlock cannot be inherited in the middle of a transaction'
2702 )
2704 )
2703
2705
2704 def wlock(self, wait=True):
2706 def wlock(self, wait=True):
2705 '''Lock the non-store parts of the repository (everything under
2707 '''Lock the non-store parts of the repository (everything under
2706 .hg except .hg/store) and return a weak reference to the lock.
2708 .hg except .hg/store) and return a weak reference to the lock.
2707
2709
2708 Use this before modifying files in .hg.
2710 Use this before modifying files in .hg.
2709
2711
2710 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2712 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2711 'wlock' first to avoid a dead-lock hazard.'''
2713 'wlock' first to avoid a dead-lock hazard.'''
2712 l = self._wlockref and self._wlockref()
2714 l = self._wlockref and self._wlockref()
2713 if l is not None and l.held:
2715 if l is not None and l.held:
2714 l.lock()
2716 l.lock()
2715 return l
2717 return l
2716
2718
2717 # We do not need to check for non-waiting lock acquisition. Such
2719 # We do not need to check for non-waiting lock acquisition. Such
2718 # acquisition would not cause dead-lock as they would just fail.
2720 # acquisition would not cause dead-lock as they would just fail.
2719 if wait and (
2721 if wait and (
2720 self.ui.configbool(b'devel', b'all-warnings')
2722 self.ui.configbool(b'devel', b'all-warnings')
2721 or self.ui.configbool(b'devel', b'check-locks')
2723 or self.ui.configbool(b'devel', b'check-locks')
2722 ):
2724 ):
2723 if self._currentlock(self._lockref) is not None:
2725 if self._currentlock(self._lockref) is not None:
2724 self.ui.develwarn(b'"wlock" acquired after "lock"')
2726 self.ui.develwarn(b'"wlock" acquired after "lock"')
2725
2727
2726 def unlock():
2728 def unlock():
2727 if self.dirstate.pendingparentchange():
2729 if self.dirstate.pendingparentchange():
2728 self.dirstate.invalidate()
2730 self.dirstate.invalidate()
2729 else:
2731 else:
2730 self.dirstate.write(None)
2732 self.dirstate.write(None)
2731
2733
2732 self._filecache[b'dirstate'].refresh()
2734 self._filecache[b'dirstate'].refresh()
2733
2735
2734 l = self._lock(
2736 l = self._lock(
2735 self.vfs,
2737 self.vfs,
2736 b"wlock",
2738 b"wlock",
2737 wait,
2739 wait,
2738 unlock,
2740 unlock,
2739 self.invalidatedirstate,
2741 self.invalidatedirstate,
2740 _(b'working directory of %s') % self.origroot,
2742 _(b'working directory of %s') % self.origroot,
2741 inheritchecker=self._wlockchecktransaction,
2743 inheritchecker=self._wlockchecktransaction,
2742 parentenvvar=b'HG_WLOCK_LOCKER',
2744 parentenvvar=b'HG_WLOCK_LOCKER',
2743 )
2745 )
2744 self._wlockref = weakref.ref(l)
2746 self._wlockref = weakref.ref(l)
2745 return l
2747 return l
2746
2748
2747 def _currentlock(self, lockref):
2749 def _currentlock(self, lockref):
2748 """Returns the lock if it's held, or None if it's not."""
2750 """Returns the lock if it's held, or None if it's not."""
2749 if lockref is None:
2751 if lockref is None:
2750 return None
2752 return None
2751 l = lockref()
2753 l = lockref()
2752 if l is None or not l.held:
2754 if l is None or not l.held:
2753 return None
2755 return None
2754 return l
2756 return l
2755
2757
2756 def currentwlock(self):
2758 def currentwlock(self):
2757 """Returns the wlock if it's held, or None if it's not."""
2759 """Returns the wlock if it's held, or None if it's not."""
2758 return self._currentlock(self._wlockref)
2760 return self._currentlock(self._wlockref)
2759
2761
2760 def _filecommit(
2762 def _filecommit(
2761 self,
2763 self,
2762 fctx,
2764 fctx,
2763 manifest1,
2765 manifest1,
2764 manifest2,
2766 manifest2,
2765 linkrev,
2767 linkrev,
2766 tr,
2768 tr,
2767 changelist,
2769 changelist,
2768 includecopymeta,
2770 includecopymeta,
2769 ):
2771 ):
2770 """
2772 """
2771 commit an individual file as part of a larger transaction
2773 commit an individual file as part of a larger transaction
2772 """
2774 """
2773
2775
2774 fname = fctx.path()
2776 fname = fctx.path()
2775 fparent1 = manifest1.get(fname, nullid)
2777 fparent1 = manifest1.get(fname, nullid)
2776 fparent2 = manifest2.get(fname, nullid)
2778 fparent2 = manifest2.get(fname, nullid)
2777 if isinstance(fctx, context.filectx):
2779 if isinstance(fctx, context.filectx):
2778 node = fctx.filenode()
2780 node = fctx.filenode()
2779 if node in [fparent1, fparent2]:
2781 if node in [fparent1, fparent2]:
2780 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2782 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2781 if (
2783 if (
2782 fparent1 != nullid
2784 fparent1 != nullid
2783 and manifest1.flags(fname) != fctx.flags()
2785 and manifest1.flags(fname) != fctx.flags()
2784 ) or (
2786 ) or (
2785 fparent2 != nullid
2787 fparent2 != nullid
2786 and manifest2.flags(fname) != fctx.flags()
2788 and manifest2.flags(fname) != fctx.flags()
2787 ):
2789 ):
2788 changelist.append(fname)
2790 changelist.append(fname)
2789 return node
2791 return node
2790
2792
2791 flog = self.file(fname)
2793 flog = self.file(fname)
2792 meta = {}
2794 meta = {}
2793 cfname = fctx.copysource()
2795 cfname = fctx.copysource()
2794 if cfname and cfname != fname:
2796 if cfname and cfname != fname:
2795 # Mark the new revision of this file as a copy of another
2797 # Mark the new revision of this file as a copy of another
2796 # file. This copy data will effectively act as a parent
2798 # file. This copy data will effectively act as a parent
2797 # of this new revision. If this is a merge, the first
2799 # of this new revision. If this is a merge, the first
2798 # parent will be the nullid (meaning "look up the copy data")
2800 # parent will be the nullid (meaning "look up the copy data")
2799 # and the second one will be the other parent. For example:
2801 # and the second one will be the other parent. For example:
2800 #
2802 #
2801 # 0 --- 1 --- 3 rev1 changes file foo
2803 # 0 --- 1 --- 3 rev1 changes file foo
2802 # \ / rev2 renames foo to bar and changes it
2804 # \ / rev2 renames foo to bar and changes it
2803 # \- 2 -/ rev3 should have bar with all changes and
2805 # \- 2 -/ rev3 should have bar with all changes and
2804 # should record that bar descends from
2806 # should record that bar descends from
2805 # bar in rev2 and foo in rev1
2807 # bar in rev2 and foo in rev1
2806 #
2808 #
2807 # this allows this merge to succeed:
2809 # this allows this merge to succeed:
2808 #
2810 #
2809 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2811 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2810 # \ / merging rev3 and rev4 should use bar@rev2
2812 # \ / merging rev3 and rev4 should use bar@rev2
2811 # \- 2 --- 4 as the merge base
2813 # \- 2 --- 4 as the merge base
2812 #
2814 #
2813
2815
2814 cnode = manifest1.get(cfname)
2816 cnode = manifest1.get(cfname)
2815 newfparent = fparent2
2817 newfparent = fparent2
2816
2818
2817 if manifest2: # branch merge
2819 if manifest2: # branch merge
2818 if fparent2 == nullid or cnode is None: # copied on remote side
2820 if fparent2 == nullid or cnode is None: # copied on remote side
2819 if cfname in manifest2:
2821 if cfname in manifest2:
2820 cnode = manifest2[cfname]
2822 cnode = manifest2[cfname]
2821 newfparent = fparent1
2823 newfparent = fparent1
2822
2824
2823 # Here, we used to search backwards through history to try to find
2825 # Here, we used to search backwards through history to try to find
2824 # where the file copy came from if the source of a copy was not in
2826 # where the file copy came from if the source of a copy was not in
2825 # the parent directory. However, this doesn't actually make sense to
2827 # the parent directory. However, this doesn't actually make sense to
2826 # do (what does a copy from something not in your working copy even
2828 # do (what does a copy from something not in your working copy even
2827 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2829 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2828 # the user that copy information was dropped, so if they didn't
2830 # the user that copy information was dropped, so if they didn't
2829 # expect this outcome it can be fixed, but this is the correct
2831 # expect this outcome it can be fixed, but this is the correct
2830 # behavior in this circumstance.
2832 # behavior in this circumstance.
2831
2833
2832 if cnode:
2834 if cnode:
2833 self.ui.debug(
2835 self.ui.debug(
2834 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2836 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2835 )
2837 )
2836 if includecopymeta:
2838 if includecopymeta:
2837 meta[b"copy"] = cfname
2839 meta[b"copy"] = cfname
2838 meta[b"copyrev"] = hex(cnode)
2840 meta[b"copyrev"] = hex(cnode)
2839 fparent1, fparent2 = nullid, newfparent
2841 fparent1, fparent2 = nullid, newfparent
2840 else:
2842 else:
2841 self.ui.warn(
2843 self.ui.warn(
2842 _(
2844 _(
2843 b"warning: can't find ancestor for '%s' "
2845 b"warning: can't find ancestor for '%s' "
2844 b"copied from '%s'!\n"
2846 b"copied from '%s'!\n"
2845 )
2847 )
2846 % (fname, cfname)
2848 % (fname, cfname)
2847 )
2849 )
2848
2850
2849 elif fparent1 == nullid:
2851 elif fparent1 == nullid:
2850 fparent1, fparent2 = fparent2, nullid
2852 fparent1, fparent2 = fparent2, nullid
2851 elif fparent2 != nullid:
2853 elif fparent2 != nullid:
2852 # is one parent an ancestor of the other?
2854 # is one parent an ancestor of the other?
2853 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2855 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2854 if fparent1 in fparentancestors:
2856 if fparent1 in fparentancestors:
2855 fparent1, fparent2 = fparent2, nullid
2857 fparent1, fparent2 = fparent2, nullid
2856 elif fparent2 in fparentancestors:
2858 elif fparent2 in fparentancestors:
2857 fparent2 = nullid
2859 fparent2 = nullid
2858 elif not fparentancestors:
2860 elif not fparentancestors:
2859 # TODO: this whole if-else might be simplified much more
2861 # TODO: this whole if-else might be simplified much more
2860 ms = mergemod.mergestate.read(self)
2862 ms = mergemod.mergestate.read(self)
2861 if (
2863 if (
2862 fname in ms
2864 fname in ms
2863 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2865 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2864 ):
2866 ):
2865 fparent1, fparent2 = fparent2, nullid
2867 fparent1, fparent2 = fparent2, nullid
2866
2868
2867 # is the file changed?
2869 # is the file changed?
2868 text = fctx.data()
2870 text = fctx.data()
2869 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2871 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2870 changelist.append(fname)
2872 changelist.append(fname)
2871 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2873 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2872 # are just the flags changed during merge?
2874 # are just the flags changed during merge?
2873 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2875 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2874 changelist.append(fname)
2876 changelist.append(fname)
2875
2877
2876 return fparent1
2878 return fparent1
2877
2879
2878 def checkcommitpatterns(self, wctx, match, status, fail):
2880 def checkcommitpatterns(self, wctx, match, status, fail):
2879 """check for commit arguments that aren't committable"""
2881 """check for commit arguments that aren't committable"""
2880 if match.isexact() or match.prefix():
2882 if match.isexact() or match.prefix():
2881 matched = set(status.modified + status.added + status.removed)
2883 matched = set(status.modified + status.added + status.removed)
2882
2884
2883 for f in match.files():
2885 for f in match.files():
2884 f = self.dirstate.normalize(f)
2886 f = self.dirstate.normalize(f)
2885 if f == b'.' or f in matched or f in wctx.substate:
2887 if f == b'.' or f in matched or f in wctx.substate:
2886 continue
2888 continue
2887 if f in status.deleted:
2889 if f in status.deleted:
2888 fail(f, _(b'file not found!'))
2890 fail(f, _(b'file not found!'))
2889 # Is it a directory that exists or used to exist?
2891 # Is it a directory that exists or used to exist?
2890 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2892 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2891 d = f + b'/'
2893 d = f + b'/'
2892 for mf in matched:
2894 for mf in matched:
2893 if mf.startswith(d):
2895 if mf.startswith(d):
2894 break
2896 break
2895 else:
2897 else:
2896 fail(f, _(b"no match under directory!"))
2898 fail(f, _(b"no match under directory!"))
2897 elif f not in self.dirstate:
2899 elif f not in self.dirstate:
2898 fail(f, _(b"file not tracked!"))
2900 fail(f, _(b"file not tracked!"))
2899
2901
2900 @unfilteredmethod
2902 @unfilteredmethod
2901 def commit(
2903 def commit(
2902 self,
2904 self,
2903 text=b"",
2905 text=b"",
2904 user=None,
2906 user=None,
2905 date=None,
2907 date=None,
2906 match=None,
2908 match=None,
2907 force=False,
2909 force=False,
2908 editor=None,
2910 editor=None,
2909 extra=None,
2911 extra=None,
2910 ):
2912 ):
2911 """Add a new revision to current repository.
2913 """Add a new revision to current repository.
2912
2914
2913 Revision information is gathered from the working directory,
2915 Revision information is gathered from the working directory,
2914 match can be used to filter the committed files. If editor is
2916 match can be used to filter the committed files. If editor is
2915 supplied, it is called to get a commit message.
2917 supplied, it is called to get a commit message.
2916 """
2918 """
2917 if extra is None:
2919 if extra is None:
2918 extra = {}
2920 extra = {}
2919
2921
2920 def fail(f, msg):
2922 def fail(f, msg):
2921 raise error.Abort(b'%s: %s' % (f, msg))
2923 raise error.Abort(b'%s: %s' % (f, msg))
2922
2924
2923 if not match:
2925 if not match:
2924 match = matchmod.always()
2926 match = matchmod.always()
2925
2927
2926 if not force:
2928 if not force:
2927 match.bad = fail
2929 match.bad = fail
2928
2930
2929 # lock() for recent changelog (see issue4368)
2931 # lock() for recent changelog (see issue4368)
2930 with self.wlock(), self.lock():
2932 with self.wlock(), self.lock():
2931 wctx = self[None]
2933 wctx = self[None]
2932 merge = len(wctx.parents()) > 1
2934 merge = len(wctx.parents()) > 1
2933
2935
2934 if not force and merge and not match.always():
2936 if not force and merge and not match.always():
2935 raise error.Abort(
2937 raise error.Abort(
2936 _(
2938 _(
2937 b'cannot partially commit a merge '
2939 b'cannot partially commit a merge '
2938 b'(do not specify files or patterns)'
2940 b'(do not specify files or patterns)'
2939 )
2941 )
2940 )
2942 )
2941
2943
2942 status = self.status(match=match, clean=force)
2944 status = self.status(match=match, clean=force)
2943 if force:
2945 if force:
2944 status.modified.extend(
2946 status.modified.extend(
2945 status.clean
2947 status.clean
2946 ) # mq may commit clean files
2948 ) # mq may commit clean files
2947
2949
2948 # check subrepos
2950 # check subrepos
2949 subs, commitsubs, newstate = subrepoutil.precommit(
2951 subs, commitsubs, newstate = subrepoutil.precommit(
2950 self.ui, wctx, status, match, force=force
2952 self.ui, wctx, status, match, force=force
2951 )
2953 )
2952
2954
2953 # make sure all explicit patterns are matched
2955 # make sure all explicit patterns are matched
2954 if not force:
2956 if not force:
2955 self.checkcommitpatterns(wctx, match, status, fail)
2957 self.checkcommitpatterns(wctx, match, status, fail)
2956
2958
2957 cctx = context.workingcommitctx(
2959 cctx = context.workingcommitctx(
2958 self, status, text, user, date, extra
2960 self, status, text, user, date, extra
2959 )
2961 )
2960
2962
2961 ms = mergemod.mergestate.read(self)
2963 ms = mergemod.mergestate.read(self)
2962 mergeutil.checkunresolved(ms)
2964 mergeutil.checkunresolved(ms)
2963
2965
2964 # internal config: ui.allowemptycommit
2966 # internal config: ui.allowemptycommit
2965 allowemptycommit = (
2967 allowemptycommit = (
2966 wctx.branch() != wctx.p1().branch()
2968 wctx.branch() != wctx.p1().branch()
2967 or extra.get(b'close')
2969 or extra.get(b'close')
2968 or merge
2970 or merge
2969 or cctx.files()
2971 or cctx.files()
2970 or self.ui.configbool(b'ui', b'allowemptycommit')
2972 or self.ui.configbool(b'ui', b'allowemptycommit')
2971 )
2973 )
2972 if not allowemptycommit:
2974 if not allowemptycommit:
2973 self.ui.debug(b'nothing to commit, clearing merge state\n')
2975 self.ui.debug(b'nothing to commit, clearing merge state\n')
2974 ms.reset()
2976 ms.reset()
2975 return None
2977 return None
2976
2978
2977 if merge and cctx.deleted():
2979 if merge and cctx.deleted():
2978 raise error.Abort(_(b"cannot commit merge with missing files"))
2980 raise error.Abort(_(b"cannot commit merge with missing files"))
2979
2981
2980 if editor:
2982 if editor:
2981 cctx._text = editor(self, cctx, subs)
2983 cctx._text = editor(self, cctx, subs)
2982 edited = text != cctx._text
2984 edited = text != cctx._text
2983
2985
2984 # Save commit message in case this transaction gets rolled back
2986 # Save commit message in case this transaction gets rolled back
2985 # (e.g. by a pretxncommit hook). Leave the content alone on
2987 # (e.g. by a pretxncommit hook). Leave the content alone on
2986 # the assumption that the user will use the same editor again.
2988 # the assumption that the user will use the same editor again.
2987 msgfn = self.savecommitmessage(cctx._text)
2989 msgfn = self.savecommitmessage(cctx._text)
2988
2990
2989 # commit subs and write new state
2991 # commit subs and write new state
2990 if subs:
2992 if subs:
2991 uipathfn = scmutil.getuipathfn(self)
2993 uipathfn = scmutil.getuipathfn(self)
2992 for s in sorted(commitsubs):
2994 for s in sorted(commitsubs):
2993 sub = wctx.sub(s)
2995 sub = wctx.sub(s)
2994 self.ui.status(
2996 self.ui.status(
2995 _(b'committing subrepository %s\n')
2997 _(b'committing subrepository %s\n')
2996 % uipathfn(subrepoutil.subrelpath(sub))
2998 % uipathfn(subrepoutil.subrelpath(sub))
2997 )
2999 )
2998 sr = sub.commit(cctx._text, user, date)
3000 sr = sub.commit(cctx._text, user, date)
2999 newstate[s] = (newstate[s][0], sr)
3001 newstate[s] = (newstate[s][0], sr)
3000 subrepoutil.writestate(self, newstate)
3002 subrepoutil.writestate(self, newstate)
3001
3003
3002 p1, p2 = self.dirstate.parents()
3004 p1, p2 = self.dirstate.parents()
3003 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3005 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3004 try:
3006 try:
3005 self.hook(
3007 self.hook(
3006 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3008 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3007 )
3009 )
3008 with self.transaction(b'commit'):
3010 with self.transaction(b'commit'):
3009 ret = self.commitctx(cctx, True)
3011 ret = self.commitctx(cctx, True)
3010 # update bookmarks, dirstate and mergestate
3012 # update bookmarks, dirstate and mergestate
3011 bookmarks.update(self, [p1, p2], ret)
3013 bookmarks.update(self, [p1, p2], ret)
3012 cctx.markcommitted(ret)
3014 cctx.markcommitted(ret)
3013 ms.reset()
3015 ms.reset()
3014 except: # re-raises
3016 except: # re-raises
3015 if edited:
3017 if edited:
3016 self.ui.write(
3018 self.ui.write(
3017 _(b'note: commit message saved in %s\n') % msgfn
3019 _(b'note: commit message saved in %s\n') % msgfn
3018 )
3020 )
3019 raise
3021 raise
3020
3022
3021 def commithook(unused_success):
3023 def commithook(unused_success):
3022 # hack for command that use a temporary commit (eg: histedit)
3024 # hack for command that use a temporary commit (eg: histedit)
3023 # temporary commit got stripped before hook release
3025 # temporary commit got stripped before hook release
3024 if self.changelog.hasnode(ret):
3026 if self.changelog.hasnode(ret):
3025 self.hook(
3027 self.hook(
3026 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3028 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3027 )
3029 )
3028
3030
3029 self._afterlock(commithook)
3031 self._afterlock(commithook)
3030 return ret
3032 return ret
3031
3033
3032 @unfilteredmethod
3034 @unfilteredmethod
3033 def commitctx(self, ctx, error=False, origctx=None):
3035 def commitctx(self, ctx, error=False, origctx=None):
3034 """Add a new revision to current repository.
3036 """Add a new revision to current repository.
3035 Revision information is passed via the context argument.
3037 Revision information is passed via the context argument.
3036
3038
3037 ctx.files() should list all files involved in this commit, i.e.
3039 ctx.files() should list all files involved in this commit, i.e.
3038 modified/added/removed files. On merge, it may be wider than the
3040 modified/added/removed files. On merge, it may be wider than the
3039 ctx.files() to be committed, since any file nodes derived directly
3041 ctx.files() to be committed, since any file nodes derived directly
3040 from p1 or p2 are excluded from the committed ctx.files().
3042 from p1 or p2 are excluded from the committed ctx.files().
3041
3043
3042 origctx is for convert to work around the problem that bug
3044 origctx is for convert to work around the problem that bug
3043 fixes to the files list in changesets change hashes. For
3045 fixes to the files list in changesets change hashes. For
3044 convert to be the identity, it can pass an origctx and this
3046 convert to be the identity, it can pass an origctx and this
3045 function will use the same files list when it makes sense to
3047 function will use the same files list when it makes sense to
3046 do so.
3048 do so.
3047 """
3049 """
3048
3050
3049 p1, p2 = ctx.p1(), ctx.p2()
3051 p1, p2 = ctx.p1(), ctx.p2()
3050 user = ctx.user()
3052 user = ctx.user()
3051
3053
3052 if self.filecopiesmode == b'changeset-sidedata':
3054 if self.filecopiesmode == b'changeset-sidedata':
3053 writechangesetcopy = True
3055 writechangesetcopy = True
3054 writefilecopymeta = True
3056 writefilecopymeta = True
3055 writecopiesto = None
3057 writecopiesto = None
3056 else:
3058 else:
3057 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3059 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3058 writefilecopymeta = writecopiesto != b'changeset-only'
3060 writefilecopymeta = writecopiesto != b'changeset-only'
3059 writechangesetcopy = writecopiesto in (
3061 writechangesetcopy = writecopiesto in (
3060 b'changeset-only',
3062 b'changeset-only',
3061 b'compatibility',
3063 b'compatibility',
3062 )
3064 )
3063 p1copies, p2copies = None, None
3065 p1copies, p2copies = None, None
3064 if writechangesetcopy:
3066 if writechangesetcopy:
3065 p1copies = ctx.p1copies()
3067 p1copies = ctx.p1copies()
3066 p2copies = ctx.p2copies()
3068 p2copies = ctx.p2copies()
3067 filesadded, filesremoved = None, None
3069 filesadded, filesremoved = None, None
3068 with self.lock(), self.transaction(b"commit") as tr:
3070 with self.lock(), self.transaction(b"commit") as tr:
3069 trp = weakref.proxy(tr)
3071 trp = weakref.proxy(tr)
3070
3072
3071 if ctx.manifestnode():
3073 if ctx.manifestnode():
3072 # reuse an existing manifest revision
3074 # reuse an existing manifest revision
3073 self.ui.debug(b'reusing known manifest\n')
3075 self.ui.debug(b'reusing known manifest\n')
3074 mn = ctx.manifestnode()
3076 mn = ctx.manifestnode()
3075 files = ctx.files()
3077 files = ctx.files()
3076 if writechangesetcopy:
3078 if writechangesetcopy:
3077 filesadded = ctx.filesadded()
3079 filesadded = ctx.filesadded()
3078 filesremoved = ctx.filesremoved()
3080 filesremoved = ctx.filesremoved()
3079 elif ctx.files():
3081 elif ctx.files():
3080 m1ctx = p1.manifestctx()
3082 m1ctx = p1.manifestctx()
3081 m2ctx = p2.manifestctx()
3083 m2ctx = p2.manifestctx()
3082 mctx = m1ctx.copy()
3084 mctx = m1ctx.copy()
3083
3085
3084 m = mctx.read()
3086 m = mctx.read()
3085 m1 = m1ctx.read()
3087 m1 = m1ctx.read()
3086 m2 = m2ctx.read()
3088 m2 = m2ctx.read()
3087
3089
3088 # check in files
3090 # check in files
3089 added = []
3091 added = []
3090 changed = []
3092 changed = []
3091 removed = list(ctx.removed())
3093 removed = list(ctx.removed())
3092 linkrev = len(self)
3094 linkrev = len(self)
3093 self.ui.note(_(b"committing files:\n"))
3095 self.ui.note(_(b"committing files:\n"))
3094 uipathfn = scmutil.getuipathfn(self)
3096 uipathfn = scmutil.getuipathfn(self)
3095 for f in sorted(ctx.modified() + ctx.added()):
3097 for f in sorted(ctx.modified() + ctx.added()):
3096 self.ui.note(uipathfn(f) + b"\n")
3098 self.ui.note(uipathfn(f) + b"\n")
3097 try:
3099 try:
3098 fctx = ctx[f]
3100 fctx = ctx[f]
3099 if fctx is None:
3101 if fctx is None:
3100 removed.append(f)
3102 removed.append(f)
3101 else:
3103 else:
3102 added.append(f)
3104 added.append(f)
3103 m[f] = self._filecommit(
3105 m[f] = self._filecommit(
3104 fctx,
3106 fctx,
3105 m1,
3107 m1,
3106 m2,
3108 m2,
3107 linkrev,
3109 linkrev,
3108 trp,
3110 trp,
3109 changed,
3111 changed,
3110 writefilecopymeta,
3112 writefilecopymeta,
3111 )
3113 )
3112 m.setflag(f, fctx.flags())
3114 m.setflag(f, fctx.flags())
3113 except OSError:
3115 except OSError:
3114 self.ui.warn(
3116 self.ui.warn(
3115 _(b"trouble committing %s!\n") % uipathfn(f)
3117 _(b"trouble committing %s!\n") % uipathfn(f)
3116 )
3118 )
3117 raise
3119 raise
3118 except IOError as inst:
3120 except IOError as inst:
3119 errcode = getattr(inst, 'errno', errno.ENOENT)
3121 errcode = getattr(inst, 'errno', errno.ENOENT)
3120 if error or errcode and errcode != errno.ENOENT:
3122 if error or errcode and errcode != errno.ENOENT:
3121 self.ui.warn(
3123 self.ui.warn(
3122 _(b"trouble committing %s!\n") % uipathfn(f)
3124 _(b"trouble committing %s!\n") % uipathfn(f)
3123 )
3125 )
3124 raise
3126 raise
3125
3127
3126 # update manifest
3128 # update manifest
3127 removed = [f for f in removed if f in m1 or f in m2]
3129 removed = [f for f in removed if f in m1 or f in m2]
3128 drop = sorted([f for f in removed if f in m])
3130 drop = sorted([f for f in removed if f in m])
3129 for f in drop:
3131 for f in drop:
3130 del m[f]
3132 del m[f]
3131 if p2.rev() != nullrev:
3133 if p2.rev() != nullrev:
3132
3134
3133 @util.cachefunc
3135 @util.cachefunc
3134 def mas():
3136 def mas():
3135 p1n = p1.node()
3137 p1n = p1.node()
3136 p2n = p2.node()
3138 p2n = p2.node()
3137 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3139 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3138 if not cahs:
3140 if not cahs:
3139 cahs = [nullrev]
3141 cahs = [nullrev]
3140 return [self[r].manifest() for r in cahs]
3142 return [self[r].manifest() for r in cahs]
3141
3143
3142 def deletionfromparent(f):
3144 def deletionfromparent(f):
3143 # When a file is removed relative to p1 in a merge, this
3145 # When a file is removed relative to p1 in a merge, this
3144 # function determines whether the absence is due to a
3146 # function determines whether the absence is due to a
3145 # deletion from a parent, or whether the merge commit
3147 # deletion from a parent, or whether the merge commit
3146 # itself deletes the file. We decide this by doing a
3148 # itself deletes the file. We decide this by doing a
3147 # simplified three way merge of the manifest entry for
3149 # simplified three way merge of the manifest entry for
3148 # the file. There are two ways we decide the merge
3150 # the file. There are two ways we decide the merge
3149 # itself didn't delete a file:
3151 # itself didn't delete a file:
3150 # - neither parent (nor the merge) contain the file
3152 # - neither parent (nor the merge) contain the file
3151 # - exactly one parent contains the file, and that
3153 # - exactly one parent contains the file, and that
3152 # parent has the same filelog entry as the merge
3154 # parent has the same filelog entry as the merge
3153 # ancestor (or all of them if there two). In other
3155 # ancestor (or all of them if there two). In other
3154 # words, that parent left the file unchanged while the
3156 # words, that parent left the file unchanged while the
3155 # other one deleted it.
3157 # other one deleted it.
3156 # One way to think about this is that deleting a file is
3158 # One way to think about this is that deleting a file is
3157 # similar to emptying it, so the list of changed files
3159 # similar to emptying it, so the list of changed files
3158 # should be similar either way. The computation
3160 # should be similar either way. The computation
3159 # described above is not done directly in _filecommit
3161 # described above is not done directly in _filecommit
3160 # when creating the list of changed files, however
3162 # when creating the list of changed files, however
3161 # it does something very similar by comparing filelog
3163 # it does something very similar by comparing filelog
3162 # nodes.
3164 # nodes.
3163 if f in m1:
3165 if f in m1:
3164 return f not in m2 and all(
3166 return f not in m2 and all(
3165 f in ma and ma.find(f) == m1.find(f)
3167 f in ma and ma.find(f) == m1.find(f)
3166 for ma in mas()
3168 for ma in mas()
3167 )
3169 )
3168 elif f in m2:
3170 elif f in m2:
3169 return all(
3171 return all(
3170 f in ma and ma.find(f) == m2.find(f)
3172 f in ma and ma.find(f) == m2.find(f)
3171 for ma in mas()
3173 for ma in mas()
3172 )
3174 )
3173 else:
3175 else:
3174 return True
3176 return True
3175
3177
3176 removed = [f for f in removed if not deletionfromparent(f)]
3178 removed = [f for f in removed if not deletionfromparent(f)]
3177
3179
3178 files = changed + removed
3180 files = changed + removed
3179 md = None
3181 md = None
3180 if not files:
3182 if not files:
3181 # if no "files" actually changed in terms of the changelog,
3183 # if no "files" actually changed in terms of the changelog,
3182 # try hard to detect unmodified manifest entry so that the
3184 # try hard to detect unmodified manifest entry so that the
3183 # exact same commit can be reproduced later on convert.
3185 # exact same commit can be reproduced later on convert.
3184 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3186 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3185 if not files and md:
3187 if not files and md:
3186 self.ui.debug(
3188 self.ui.debug(
3187 b'not reusing manifest (no file change in '
3189 b'not reusing manifest (no file change in '
3188 b'changelog, but manifest differs)\n'
3190 b'changelog, but manifest differs)\n'
3189 )
3191 )
3190 if files or md:
3192 if files or md:
3191 self.ui.note(_(b"committing manifest\n"))
3193 self.ui.note(_(b"committing manifest\n"))
3192 # we're using narrowmatch here since it's already applied at
3194 # we're using narrowmatch here since it's already applied at
3193 # other stages (such as dirstate.walk), so we're already
3195 # other stages (such as dirstate.walk), so we're already
3194 # ignoring things outside of narrowspec in most cases. The
3196 # ignoring things outside of narrowspec in most cases. The
3195 # one case where we might have files outside the narrowspec
3197 # one case where we might have files outside the narrowspec
3196 # at this point is merges, and we already error out in the
3198 # at this point is merges, and we already error out in the
3197 # case where the merge has files outside of the narrowspec,
3199 # case where the merge has files outside of the narrowspec,
3198 # so this is safe.
3200 # so this is safe.
3199 mn = mctx.write(
3201 mn = mctx.write(
3200 trp,
3202 trp,
3201 linkrev,
3203 linkrev,
3202 p1.manifestnode(),
3204 p1.manifestnode(),
3203 p2.manifestnode(),
3205 p2.manifestnode(),
3204 added,
3206 added,
3205 drop,
3207 drop,
3206 match=self.narrowmatch(),
3208 match=self.narrowmatch(),
3207 )
3209 )
3208
3210
3209 if writechangesetcopy:
3211 if writechangesetcopy:
3210 filesadded = [
3212 filesadded = [
3211 f for f in changed if not (f in m1 or f in m2)
3213 f for f in changed if not (f in m1 or f in m2)
3212 ]
3214 ]
3213 filesremoved = removed
3215 filesremoved = removed
3214 else:
3216 else:
3215 self.ui.debug(
3217 self.ui.debug(
3216 b'reusing manifest from p1 (listed files '
3218 b'reusing manifest from p1 (listed files '
3217 b'actually unchanged)\n'
3219 b'actually unchanged)\n'
3218 )
3220 )
3219 mn = p1.manifestnode()
3221 mn = p1.manifestnode()
3220 else:
3222 else:
3221 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3223 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3222 mn = p1.manifestnode()
3224 mn = p1.manifestnode()
3223 files = []
3225 files = []
3224
3226
3225 if writecopiesto == b'changeset-only':
3227 if writecopiesto == b'changeset-only':
3226 # If writing only to changeset extras, use None to indicate that
3228 # If writing only to changeset extras, use None to indicate that
3227 # no entry should be written. If writing to both, write an empty
3229 # no entry should be written. If writing to both, write an empty
3228 # entry to prevent the reader from falling back to reading
3230 # entry to prevent the reader from falling back to reading
3229 # filelogs.
3231 # filelogs.
3230 p1copies = p1copies or None
3232 p1copies = p1copies or None
3231 p2copies = p2copies or None
3233 p2copies = p2copies or None
3232 filesadded = filesadded or None
3234 filesadded = filesadded or None
3233 filesremoved = filesremoved or None
3235 filesremoved = filesremoved or None
3234
3236
3235 if origctx and origctx.manifestnode() == mn:
3237 if origctx and origctx.manifestnode() == mn:
3236 files = origctx.files()
3238 files = origctx.files()
3237
3239
3238 # update changelog
3240 # update changelog
3239 self.ui.note(_(b"committing changelog\n"))
3241 self.ui.note(_(b"committing changelog\n"))
3240 self.changelog.delayupdate(tr)
3242 self.changelog.delayupdate(tr)
3241 n = self.changelog.add(
3243 n = self.changelog.add(
3242 mn,
3244 mn,
3243 files,
3245 files,
3244 ctx.description(),
3246 ctx.description(),
3245 trp,
3247 trp,
3246 p1.node(),
3248 p1.node(),
3247 p2.node(),
3249 p2.node(),
3248 user,
3250 user,
3249 ctx.date(),
3251 ctx.date(),
3250 ctx.extra().copy(),
3252 ctx.extra().copy(),
3251 p1copies,
3253 p1copies,
3252 p2copies,
3254 p2copies,
3253 filesadded,
3255 filesadded,
3254 filesremoved,
3256 filesremoved,
3255 )
3257 )
3256 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3258 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3257 self.hook(
3259 self.hook(
3258 b'pretxncommit',
3260 b'pretxncommit',
3259 throw=True,
3261 throw=True,
3260 node=hex(n),
3262 node=hex(n),
3261 parent1=xp1,
3263 parent1=xp1,
3262 parent2=xp2,
3264 parent2=xp2,
3263 )
3265 )
3264 # set the new commit is proper phase
3266 # set the new commit is proper phase
3265 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3267 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3266 if targetphase:
3268 if targetphase:
3267 # retract boundary do not alter parent changeset.
3269 # retract boundary do not alter parent changeset.
3268 # if a parent have higher the resulting phase will
3270 # if a parent have higher the resulting phase will
3269 # be compliant anyway
3271 # be compliant anyway
3270 #
3272 #
3271 # if minimal phase was 0 we don't need to retract anything
3273 # if minimal phase was 0 we don't need to retract anything
3272 phases.registernew(self, tr, targetphase, [n])
3274 phases.registernew(self, tr, targetphase, [n])
3273 return n
3275 return n
3274
3276
3275 @unfilteredmethod
3277 @unfilteredmethod
3276 def destroying(self):
3278 def destroying(self):
3277 '''Inform the repository that nodes are about to be destroyed.
3279 '''Inform the repository that nodes are about to be destroyed.
3278 Intended for use by strip and rollback, so there's a common
3280 Intended for use by strip and rollback, so there's a common
3279 place for anything that has to be done before destroying history.
3281 place for anything that has to be done before destroying history.
3280
3282
3281 This is mostly useful for saving state that is in memory and waiting
3283 This is mostly useful for saving state that is in memory and waiting
3282 to be flushed when the current lock is released. Because a call to
3284 to be flushed when the current lock is released. Because a call to
3283 destroyed is imminent, the repo will be invalidated causing those
3285 destroyed is imminent, the repo will be invalidated causing those
3284 changes to stay in memory (waiting for the next unlock), or vanish
3286 changes to stay in memory (waiting for the next unlock), or vanish
3285 completely.
3287 completely.
3286 '''
3288 '''
3287 # When using the same lock to commit and strip, the phasecache is left
3289 # When using the same lock to commit and strip, the phasecache is left
3288 # dirty after committing. Then when we strip, the repo is invalidated,
3290 # dirty after committing. Then when we strip, the repo is invalidated,
3289 # causing those changes to disappear.
3291 # causing those changes to disappear.
3290 if '_phasecache' in vars(self):
3292 if '_phasecache' in vars(self):
3291 self._phasecache.write()
3293 self._phasecache.write()
3292
3294
3293 @unfilteredmethod
3295 @unfilteredmethod
3294 def destroyed(self):
3296 def destroyed(self):
3295 '''Inform the repository that nodes have been destroyed.
3297 '''Inform the repository that nodes have been destroyed.
3296 Intended for use by strip and rollback, so there's a common
3298 Intended for use by strip and rollback, so there's a common
3297 place for anything that has to be done after destroying history.
3299 place for anything that has to be done after destroying history.
3298 '''
3300 '''
3299 # When one tries to:
3301 # When one tries to:
3300 # 1) destroy nodes thus calling this method (e.g. strip)
3302 # 1) destroy nodes thus calling this method (e.g. strip)
3301 # 2) use phasecache somewhere (e.g. commit)
3303 # 2) use phasecache somewhere (e.g. commit)
3302 #
3304 #
3303 # then 2) will fail because the phasecache contains nodes that were
3305 # then 2) will fail because the phasecache contains nodes that were
3304 # removed. We can either remove phasecache from the filecache,
3306 # removed. We can either remove phasecache from the filecache,
3305 # causing it to reload next time it is accessed, or simply filter
3307 # causing it to reload next time it is accessed, or simply filter
3306 # the removed nodes now and write the updated cache.
3308 # the removed nodes now and write the updated cache.
3307 self._phasecache.filterunknown(self)
3309 self._phasecache.filterunknown(self)
3308 self._phasecache.write()
3310 self._phasecache.write()
3309
3311
3310 # refresh all repository caches
3312 # refresh all repository caches
3311 self.updatecaches()
3313 self.updatecaches()
3312
3314
3313 # Ensure the persistent tag cache is updated. Doing it now
3315 # Ensure the persistent tag cache is updated. Doing it now
3314 # means that the tag cache only has to worry about destroyed
3316 # means that the tag cache only has to worry about destroyed
3315 # heads immediately after a strip/rollback. That in turn
3317 # heads immediately after a strip/rollback. That in turn
3316 # guarantees that "cachetip == currenttip" (comparing both rev
3318 # guarantees that "cachetip == currenttip" (comparing both rev
3317 # and node) always means no nodes have been added or destroyed.
3319 # and node) always means no nodes have been added or destroyed.
3318
3320
3319 # XXX this is suboptimal when qrefresh'ing: we strip the current
3321 # XXX this is suboptimal when qrefresh'ing: we strip the current
3320 # head, refresh the tag cache, then immediately add a new head.
3322 # head, refresh the tag cache, then immediately add a new head.
3321 # But I think doing it this way is necessary for the "instant
3323 # But I think doing it this way is necessary for the "instant
3322 # tag cache retrieval" case to work.
3324 # tag cache retrieval" case to work.
3323 self.invalidate()
3325 self.invalidate()
3324
3326
3325 def status(
3327 def status(
3326 self,
3328 self,
3327 node1=b'.',
3329 node1=b'.',
3328 node2=None,
3330 node2=None,
3329 match=None,
3331 match=None,
3330 ignored=False,
3332 ignored=False,
3331 clean=False,
3333 clean=False,
3332 unknown=False,
3334 unknown=False,
3333 listsubrepos=False,
3335 listsubrepos=False,
3334 ):
3336 ):
3335 '''a convenience method that calls node1.status(node2)'''
3337 '''a convenience method that calls node1.status(node2)'''
3336 return self[node1].status(
3338 return self[node1].status(
3337 node2, match, ignored, clean, unknown, listsubrepos
3339 node2, match, ignored, clean, unknown, listsubrepos
3338 )
3340 )
3339
3341
3340 def addpostdsstatus(self, ps):
3342 def addpostdsstatus(self, ps):
3341 """Add a callback to run within the wlock, at the point at which status
3343 """Add a callback to run within the wlock, at the point at which status
3342 fixups happen.
3344 fixups happen.
3343
3345
3344 On status completion, callback(wctx, status) will be called with the
3346 On status completion, callback(wctx, status) will be called with the
3345 wlock held, unless the dirstate has changed from underneath or the wlock
3347 wlock held, unless the dirstate has changed from underneath or the wlock
3346 couldn't be grabbed.
3348 couldn't be grabbed.
3347
3349
3348 Callbacks should not capture and use a cached copy of the dirstate --
3350 Callbacks should not capture and use a cached copy of the dirstate --
3349 it might change in the meanwhile. Instead, they should access the
3351 it might change in the meanwhile. Instead, they should access the
3350 dirstate via wctx.repo().dirstate.
3352 dirstate via wctx.repo().dirstate.
3351
3353
3352 This list is emptied out after each status run -- extensions should
3354 This list is emptied out after each status run -- extensions should
3353 make sure it adds to this list each time dirstate.status is called.
3355 make sure it adds to this list each time dirstate.status is called.
3354 Extensions should also make sure they don't call this for statuses
3356 Extensions should also make sure they don't call this for statuses
3355 that don't involve the dirstate.
3357 that don't involve the dirstate.
3356 """
3358 """
3357
3359
3358 # The list is located here for uniqueness reasons -- it is actually
3360 # The list is located here for uniqueness reasons -- it is actually
3359 # managed by the workingctx, but that isn't unique per-repo.
3361 # managed by the workingctx, but that isn't unique per-repo.
3360 self._postdsstatus.append(ps)
3362 self._postdsstatus.append(ps)
3361
3363
3362 def postdsstatus(self):
3364 def postdsstatus(self):
3363 """Used by workingctx to get the list of post-dirstate-status hooks."""
3365 """Used by workingctx to get the list of post-dirstate-status hooks."""
3364 return self._postdsstatus
3366 return self._postdsstatus
3365
3367
3366 def clearpostdsstatus(self):
3368 def clearpostdsstatus(self):
3367 """Used by workingctx to clear post-dirstate-status hooks."""
3369 """Used by workingctx to clear post-dirstate-status hooks."""
3368 del self._postdsstatus[:]
3370 del self._postdsstatus[:]
3369
3371
3370 def heads(self, start=None):
3372 def heads(self, start=None):
3371 if start is None:
3373 if start is None:
3372 cl = self.changelog
3374 cl = self.changelog
3373 headrevs = reversed(cl.headrevs())
3375 headrevs = reversed(cl.headrevs())
3374 return [cl.node(rev) for rev in headrevs]
3376 return [cl.node(rev) for rev in headrevs]
3375
3377
3376 heads = self.changelog.heads(start)
3378 heads = self.changelog.heads(start)
3377 # sort the output in rev descending order
3379 # sort the output in rev descending order
3378 return sorted(heads, key=self.changelog.rev, reverse=True)
3380 return sorted(heads, key=self.changelog.rev, reverse=True)
3379
3381
3380 def branchheads(self, branch=None, start=None, closed=False):
3382 def branchheads(self, branch=None, start=None, closed=False):
3381 '''return a (possibly filtered) list of heads for the given branch
3383 '''return a (possibly filtered) list of heads for the given branch
3382
3384
3383 Heads are returned in topological order, from newest to oldest.
3385 Heads are returned in topological order, from newest to oldest.
3384 If branch is None, use the dirstate branch.
3386 If branch is None, use the dirstate branch.
3385 If start is not None, return only heads reachable from start.
3387 If start is not None, return only heads reachable from start.
3386 If closed is True, return heads that are marked as closed as well.
3388 If closed is True, return heads that are marked as closed as well.
3387 '''
3389 '''
3388 if branch is None:
3390 if branch is None:
3389 branch = self[None].branch()
3391 branch = self[None].branch()
3390 branches = self.branchmap()
3392 branches = self.branchmap()
3391 if not branches.hasbranch(branch):
3393 if not branches.hasbranch(branch):
3392 return []
3394 return []
3393 # the cache returns heads ordered lowest to highest
3395 # the cache returns heads ordered lowest to highest
3394 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3396 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3395 if start is not None:
3397 if start is not None:
3396 # filter out the heads that cannot be reached from startrev
3398 # filter out the heads that cannot be reached from startrev
3397 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3399 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3398 bheads = [h for h in bheads if h in fbheads]
3400 bheads = [h for h in bheads if h in fbheads]
3399 return bheads
3401 return bheads
3400
3402
3401 def branches(self, nodes):
3403 def branches(self, nodes):
3402 if not nodes:
3404 if not nodes:
3403 nodes = [self.changelog.tip()]
3405 nodes = [self.changelog.tip()]
3404 b = []
3406 b = []
3405 for n in nodes:
3407 for n in nodes:
3406 t = n
3408 t = n
3407 while True:
3409 while True:
3408 p = self.changelog.parents(n)
3410 p = self.changelog.parents(n)
3409 if p[1] != nullid or p[0] == nullid:
3411 if p[1] != nullid or p[0] == nullid:
3410 b.append((t, n, p[0], p[1]))
3412 b.append((t, n, p[0], p[1]))
3411 break
3413 break
3412 n = p[0]
3414 n = p[0]
3413 return b
3415 return b
3414
3416
3415 def between(self, pairs):
3417 def between(self, pairs):
3416 r = []
3418 r = []
3417
3419
3418 for top, bottom in pairs:
3420 for top, bottom in pairs:
3419 n, l, i = top, [], 0
3421 n, l, i = top, [], 0
3420 f = 1
3422 f = 1
3421
3423
3422 while n != bottom and n != nullid:
3424 while n != bottom and n != nullid:
3423 p = self.changelog.parents(n)[0]
3425 p = self.changelog.parents(n)[0]
3424 if i == f:
3426 if i == f:
3425 l.append(n)
3427 l.append(n)
3426 f = f * 2
3428 f = f * 2
3427 n = p
3429 n = p
3428 i += 1
3430 i += 1
3429
3431
3430 r.append(l)
3432 r.append(l)
3431
3433
3432 return r
3434 return r
3433
3435
3434 def checkpush(self, pushop):
3436 def checkpush(self, pushop):
3435 """Extensions can override this function if additional checks have
3437 """Extensions can override this function if additional checks have
3436 to be performed before pushing, or call it if they override push
3438 to be performed before pushing, or call it if they override push
3437 command.
3439 command.
3438 """
3440 """
3439
3441
3440 @unfilteredpropertycache
3442 @unfilteredpropertycache
3441 def prepushoutgoinghooks(self):
3443 def prepushoutgoinghooks(self):
3442 """Return util.hooks consists of a pushop with repo, remote, outgoing
3444 """Return util.hooks consists of a pushop with repo, remote, outgoing
3443 methods, which are called before pushing changesets.
3445 methods, which are called before pushing changesets.
3444 """
3446 """
3445 return util.hooks()
3447 return util.hooks()
3446
3448
3447 def pushkey(self, namespace, key, old, new):
3449 def pushkey(self, namespace, key, old, new):
3448 try:
3450 try:
3449 tr = self.currenttransaction()
3451 tr = self.currenttransaction()
3450 hookargs = {}
3452 hookargs = {}
3451 if tr is not None:
3453 if tr is not None:
3452 hookargs.update(tr.hookargs)
3454 hookargs.update(tr.hookargs)
3453 hookargs = pycompat.strkwargs(hookargs)
3455 hookargs = pycompat.strkwargs(hookargs)
3454 hookargs['namespace'] = namespace
3456 hookargs['namespace'] = namespace
3455 hookargs['key'] = key
3457 hookargs['key'] = key
3456 hookargs['old'] = old
3458 hookargs['old'] = old
3457 hookargs['new'] = new
3459 hookargs['new'] = new
3458 self.hook(b'prepushkey', throw=True, **hookargs)
3460 self.hook(b'prepushkey', throw=True, **hookargs)
3459 except error.HookAbort as exc:
3461 except error.HookAbort as exc:
3460 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3462 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3461 if exc.hint:
3463 if exc.hint:
3462 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3464 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3463 return False
3465 return False
3464 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3466 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3465 ret = pushkey.push(self, namespace, key, old, new)
3467 ret = pushkey.push(self, namespace, key, old, new)
3466
3468
3467 def runhook(unused_success):
3469 def runhook(unused_success):
3468 self.hook(
3470 self.hook(
3469 b'pushkey',
3471 b'pushkey',
3470 namespace=namespace,
3472 namespace=namespace,
3471 key=key,
3473 key=key,
3472 old=old,
3474 old=old,
3473 new=new,
3475 new=new,
3474 ret=ret,
3476 ret=ret,
3475 )
3477 )
3476
3478
3477 self._afterlock(runhook)
3479 self._afterlock(runhook)
3478 return ret
3480 return ret
3479
3481
3480 def listkeys(self, namespace):
3482 def listkeys(self, namespace):
3481 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3483 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3482 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3484 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3483 values = pushkey.list(self, namespace)
3485 values = pushkey.list(self, namespace)
3484 self.hook(b'listkeys', namespace=namespace, values=values)
3486 self.hook(b'listkeys', namespace=namespace, values=values)
3485 return values
3487 return values
3486
3488
3487 def debugwireargs(self, one, two, three=None, four=None, five=None):
3489 def debugwireargs(self, one, two, three=None, four=None, five=None):
3488 '''used to test argument passing over the wire'''
3490 '''used to test argument passing over the wire'''
3489 return b"%s %s %s %s %s" % (
3491 return b"%s %s %s %s %s" % (
3490 one,
3492 one,
3491 two,
3493 two,
3492 pycompat.bytestr(three),
3494 pycompat.bytestr(three),
3493 pycompat.bytestr(four),
3495 pycompat.bytestr(four),
3494 pycompat.bytestr(five),
3496 pycompat.bytestr(five),
3495 )
3497 )
3496
3498
3497 def savecommitmessage(self, text):
3499 def savecommitmessage(self, text):
3498 fp = self.vfs(b'last-message.txt', b'wb')
3500 fp = self.vfs(b'last-message.txt', b'wb')
3499 try:
3501 try:
3500 fp.write(text)
3502 fp.write(text)
3501 finally:
3503 finally:
3502 fp.close()
3504 fp.close()
3503 return self.pathto(fp.name[len(self.root) + 1 :])
3505 return self.pathto(fp.name[len(self.root) + 1 :])
3504
3506
3505
3507
3506 # used to avoid circular references so destructors work
3508 # used to avoid circular references so destructors work
3507 def aftertrans(files):
3509 def aftertrans(files):
3508 renamefiles = [tuple(t) for t in files]
3510 renamefiles = [tuple(t) for t in files]
3509
3511
3510 def a():
3512 def a():
3511 for vfs, src, dest in renamefiles:
3513 for vfs, src, dest in renamefiles:
3512 # if src and dest refer to a same file, vfs.rename is a no-op,
3514 # if src and dest refer to a same file, vfs.rename is a no-op,
3513 # leaving both src and dest on disk. delete dest to make sure
3515 # leaving both src and dest on disk. delete dest to make sure
3514 # the rename couldn't be such a no-op.
3516 # the rename couldn't be such a no-op.
3515 vfs.tryunlink(dest)
3517 vfs.tryunlink(dest)
3516 try:
3518 try:
3517 vfs.rename(src, dest)
3519 vfs.rename(src, dest)
3518 except OSError: # journal file does not yet exist
3520 except OSError: # journal file does not yet exist
3519 pass
3521 pass
3520
3522
3521 return a
3523 return a
3522
3524
3523
3525
3524 def undoname(fn):
3526 def undoname(fn):
3525 base, name = os.path.split(fn)
3527 base, name = os.path.split(fn)
3526 assert name.startswith(b'journal')
3528 assert name.startswith(b'journal')
3527 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3529 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3528
3530
3529
3531
3530 def instance(ui, path, create, intents=None, createopts=None):
3532 def instance(ui, path, create, intents=None, createopts=None):
3531 localpath = util.urllocalpath(path)
3533 localpath = util.urllocalpath(path)
3532 if create:
3534 if create:
3533 createrepository(ui, localpath, createopts=createopts)
3535 createrepository(ui, localpath, createopts=createopts)
3534
3536
3535 return makelocalrepository(ui, localpath, intents=intents)
3537 return makelocalrepository(ui, localpath, intents=intents)
3536
3538
3537
3539
3538 def islocal(path):
3540 def islocal(path):
3539 return True
3541 return True
3540
3542
3541
3543
3542 def defaultcreateopts(ui, createopts=None):
3544 def defaultcreateopts(ui, createopts=None):
3543 """Populate the default creation options for a repository.
3545 """Populate the default creation options for a repository.
3544
3546
3545 A dictionary of explicitly requested creation options can be passed
3547 A dictionary of explicitly requested creation options can be passed
3546 in. Missing keys will be populated.
3548 in. Missing keys will be populated.
3547 """
3549 """
3548 createopts = dict(createopts or {})
3550 createopts = dict(createopts or {})
3549
3551
3550 if b'backend' not in createopts:
3552 if b'backend' not in createopts:
3551 # experimental config: storage.new-repo-backend
3553 # experimental config: storage.new-repo-backend
3552 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3554 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3553
3555
3554 return createopts
3556 return createopts
3555
3557
3556
3558
3557 def newreporequirements(ui, createopts):
3559 def newreporequirements(ui, createopts):
3558 """Determine the set of requirements for a new local repository.
3560 """Determine the set of requirements for a new local repository.
3559
3561
3560 Extensions can wrap this function to specify custom requirements for
3562 Extensions can wrap this function to specify custom requirements for
3561 new repositories.
3563 new repositories.
3562 """
3564 """
3563 # If the repo is being created from a shared repository, we copy
3565 # If the repo is being created from a shared repository, we copy
3564 # its requirements.
3566 # its requirements.
3565 if b'sharedrepo' in createopts:
3567 if b'sharedrepo' in createopts:
3566 requirements = set(createopts[b'sharedrepo'].requirements)
3568 requirements = set(createopts[b'sharedrepo'].requirements)
3567 if createopts.get(b'sharedrelative'):
3569 if createopts.get(b'sharedrelative'):
3568 requirements.add(b'relshared')
3570 requirements.add(b'relshared')
3569 else:
3571 else:
3570 requirements.add(b'shared')
3572 requirements.add(b'shared')
3571
3573
3572 return requirements
3574 return requirements
3573
3575
3574 if b'backend' not in createopts:
3576 if b'backend' not in createopts:
3575 raise error.ProgrammingError(
3577 raise error.ProgrammingError(
3576 b'backend key not present in createopts; '
3578 b'backend key not present in createopts; '
3577 b'was defaultcreateopts() called?'
3579 b'was defaultcreateopts() called?'
3578 )
3580 )
3579
3581
3580 if createopts[b'backend'] != b'revlogv1':
3582 if createopts[b'backend'] != b'revlogv1':
3581 raise error.Abort(
3583 raise error.Abort(
3582 _(
3584 _(
3583 b'unable to determine repository requirements for '
3585 b'unable to determine repository requirements for '
3584 b'storage backend: %s'
3586 b'storage backend: %s'
3585 )
3587 )
3586 % createopts[b'backend']
3588 % createopts[b'backend']
3587 )
3589 )
3588
3590
3589 requirements = {b'revlogv1'}
3591 requirements = {b'revlogv1'}
3590 if ui.configbool(b'format', b'usestore'):
3592 if ui.configbool(b'format', b'usestore'):
3591 requirements.add(b'store')
3593 requirements.add(b'store')
3592 if ui.configbool(b'format', b'usefncache'):
3594 if ui.configbool(b'format', b'usefncache'):
3593 requirements.add(b'fncache')
3595 requirements.add(b'fncache')
3594 if ui.configbool(b'format', b'dotencode'):
3596 if ui.configbool(b'format', b'dotencode'):
3595 requirements.add(b'dotencode')
3597 requirements.add(b'dotencode')
3596
3598
3597 compengines = ui.configlist(b'format', b'revlog-compression')
3599 compengines = ui.configlist(b'format', b'revlog-compression')
3598 for compengine in compengines:
3600 for compengine in compengines:
3599 if compengine in util.compengines:
3601 if compengine in util.compengines:
3600 break
3602 break
3601 else:
3603 else:
3602 raise error.Abort(
3604 raise error.Abort(
3603 _(
3605 _(
3604 b'compression engines %s defined by '
3606 b'compression engines %s defined by '
3605 b'format.revlog-compression not available'
3607 b'format.revlog-compression not available'
3606 )
3608 )
3607 % b', '.join(b'"%s"' % e for e in compengines),
3609 % b', '.join(b'"%s"' % e for e in compengines),
3608 hint=_(
3610 hint=_(
3609 b'run "hg debuginstall" to list available '
3611 b'run "hg debuginstall" to list available '
3610 b'compression engines'
3612 b'compression engines'
3611 ),
3613 ),
3612 )
3614 )
3613
3615
3614 # zlib is the historical default and doesn't need an explicit requirement.
3616 # zlib is the historical default and doesn't need an explicit requirement.
3615 if compengine == b'zstd':
3617 if compengine == b'zstd':
3616 requirements.add(b'revlog-compression-zstd')
3618 requirements.add(b'revlog-compression-zstd')
3617 elif compengine != b'zlib':
3619 elif compengine != b'zlib':
3618 requirements.add(b'exp-compression-%s' % compengine)
3620 requirements.add(b'exp-compression-%s' % compengine)
3619
3621
3620 if scmutil.gdinitconfig(ui):
3622 if scmutil.gdinitconfig(ui):
3621 requirements.add(b'generaldelta')
3623 requirements.add(b'generaldelta')
3622 if ui.configbool(b'format', b'sparse-revlog'):
3624 if ui.configbool(b'format', b'sparse-revlog'):
3623 requirements.add(SPARSEREVLOG_REQUIREMENT)
3625 requirements.add(SPARSEREVLOG_REQUIREMENT)
3624
3626
3625 # experimental config: format.exp-use-side-data
3627 # experimental config: format.exp-use-side-data
3626 if ui.configbool(b'format', b'exp-use-side-data'):
3628 if ui.configbool(b'format', b'exp-use-side-data'):
3627 requirements.add(SIDEDATA_REQUIREMENT)
3629 requirements.add(SIDEDATA_REQUIREMENT)
3628 # experimental config: format.exp-use-copies-side-data-changeset
3630 # experimental config: format.exp-use-copies-side-data-changeset
3629 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3631 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3630 requirements.add(SIDEDATA_REQUIREMENT)
3632 requirements.add(SIDEDATA_REQUIREMENT)
3631 requirements.add(COPIESSDC_REQUIREMENT)
3633 requirements.add(COPIESSDC_REQUIREMENT)
3632 if ui.configbool(b'experimental', b'treemanifest'):
3634 if ui.configbool(b'experimental', b'treemanifest'):
3633 requirements.add(b'treemanifest')
3635 requirements.add(b'treemanifest')
3634
3636
3635 revlogv2 = ui.config(b'experimental', b'revlogv2')
3637 revlogv2 = ui.config(b'experimental', b'revlogv2')
3636 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3638 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3637 requirements.remove(b'revlogv1')
3639 requirements.remove(b'revlogv1')
3638 # generaldelta is implied by revlogv2.
3640 # generaldelta is implied by revlogv2.
3639 requirements.discard(b'generaldelta')
3641 requirements.discard(b'generaldelta')
3640 requirements.add(REVLOGV2_REQUIREMENT)
3642 requirements.add(REVLOGV2_REQUIREMENT)
3641 # experimental config: format.internal-phase
3643 # experimental config: format.internal-phase
3642 if ui.configbool(b'format', b'internal-phase'):
3644 if ui.configbool(b'format', b'internal-phase'):
3643 requirements.add(b'internal-phase')
3645 requirements.add(b'internal-phase')
3644
3646
3645 if createopts.get(b'narrowfiles'):
3647 if createopts.get(b'narrowfiles'):
3646 requirements.add(repository.NARROW_REQUIREMENT)
3648 requirements.add(repository.NARROW_REQUIREMENT)
3647
3649
3648 if createopts.get(b'lfs'):
3650 if createopts.get(b'lfs'):
3649 requirements.add(b'lfs')
3651 requirements.add(b'lfs')
3650
3652
3651 if ui.configbool(b'format', b'bookmarks-in-store'):
3653 if ui.configbool(b'format', b'bookmarks-in-store'):
3652 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3654 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3653
3655
3654 return requirements
3656 return requirements
3655
3657
3656
3658
3657 def filterknowncreateopts(ui, createopts):
3659 def filterknowncreateopts(ui, createopts):
3658 """Filters a dict of repo creation options against options that are known.
3660 """Filters a dict of repo creation options against options that are known.
3659
3661
3660 Receives a dict of repo creation options and returns a dict of those
3662 Receives a dict of repo creation options and returns a dict of those
3661 options that we don't know how to handle.
3663 options that we don't know how to handle.
3662
3664
3663 This function is called as part of repository creation. If the
3665 This function is called as part of repository creation. If the
3664 returned dict contains any items, repository creation will not
3666 returned dict contains any items, repository creation will not
3665 be allowed, as it means there was a request to create a repository
3667 be allowed, as it means there was a request to create a repository
3666 with options not recognized by loaded code.
3668 with options not recognized by loaded code.
3667
3669
3668 Extensions can wrap this function to filter out creation options
3670 Extensions can wrap this function to filter out creation options
3669 they know how to handle.
3671 they know how to handle.
3670 """
3672 """
3671 known = {
3673 known = {
3672 b'backend',
3674 b'backend',
3673 b'lfs',
3675 b'lfs',
3674 b'narrowfiles',
3676 b'narrowfiles',
3675 b'sharedrepo',
3677 b'sharedrepo',
3676 b'sharedrelative',
3678 b'sharedrelative',
3677 b'shareditems',
3679 b'shareditems',
3678 b'shallowfilestore',
3680 b'shallowfilestore',
3679 }
3681 }
3680
3682
3681 return {k: v for k, v in createopts.items() if k not in known}
3683 return {k: v for k, v in createopts.items() if k not in known}
3682
3684
3683
3685
3684 def createrepository(ui, path, createopts=None):
3686 def createrepository(ui, path, createopts=None):
3685 """Create a new repository in a vfs.
3687 """Create a new repository in a vfs.
3686
3688
3687 ``path`` path to the new repo's working directory.
3689 ``path`` path to the new repo's working directory.
3688 ``createopts`` options for the new repository.
3690 ``createopts`` options for the new repository.
3689
3691
3690 The following keys for ``createopts`` are recognized:
3692 The following keys for ``createopts`` are recognized:
3691
3693
3692 backend
3694 backend
3693 The storage backend to use.
3695 The storage backend to use.
3694 lfs
3696 lfs
3695 Repository will be created with ``lfs`` requirement. The lfs extension
3697 Repository will be created with ``lfs`` requirement. The lfs extension
3696 will automatically be loaded when the repository is accessed.
3698 will automatically be loaded when the repository is accessed.
3697 narrowfiles
3699 narrowfiles
3698 Set up repository to support narrow file storage.
3700 Set up repository to support narrow file storage.
3699 sharedrepo
3701 sharedrepo
3700 Repository object from which storage should be shared.
3702 Repository object from which storage should be shared.
3701 sharedrelative
3703 sharedrelative
3702 Boolean indicating if the path to the shared repo should be
3704 Boolean indicating if the path to the shared repo should be
3703 stored as relative. By default, the pointer to the "parent" repo
3705 stored as relative. By default, the pointer to the "parent" repo
3704 is stored as an absolute path.
3706 is stored as an absolute path.
3705 shareditems
3707 shareditems
3706 Set of items to share to the new repository (in addition to storage).
3708 Set of items to share to the new repository (in addition to storage).
3707 shallowfilestore
3709 shallowfilestore
3708 Indicates that storage for files should be shallow (not all ancestor
3710 Indicates that storage for files should be shallow (not all ancestor
3709 revisions are known).
3711 revisions are known).
3710 """
3712 """
3711 createopts = defaultcreateopts(ui, createopts=createopts)
3713 createopts = defaultcreateopts(ui, createopts=createopts)
3712
3714
3713 unknownopts = filterknowncreateopts(ui, createopts)
3715 unknownopts = filterknowncreateopts(ui, createopts)
3714
3716
3715 if not isinstance(unknownopts, dict):
3717 if not isinstance(unknownopts, dict):
3716 raise error.ProgrammingError(
3718 raise error.ProgrammingError(
3717 b'filterknowncreateopts() did not return a dict'
3719 b'filterknowncreateopts() did not return a dict'
3718 )
3720 )
3719
3721
3720 if unknownopts:
3722 if unknownopts:
3721 raise error.Abort(
3723 raise error.Abort(
3722 _(
3724 _(
3723 b'unable to create repository because of unknown '
3725 b'unable to create repository because of unknown '
3724 b'creation option: %s'
3726 b'creation option: %s'
3725 )
3727 )
3726 % b', '.join(sorted(unknownopts)),
3728 % b', '.join(sorted(unknownopts)),
3727 hint=_(b'is a required extension not loaded?'),
3729 hint=_(b'is a required extension not loaded?'),
3728 )
3730 )
3729
3731
3730 requirements = newreporequirements(ui, createopts=createopts)
3732 requirements = newreporequirements(ui, createopts=createopts)
3731
3733
3732 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3734 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3733
3735
3734 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3736 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3735 if hgvfs.exists():
3737 if hgvfs.exists():
3736 raise error.RepoError(_(b'repository %s already exists') % path)
3738 raise error.RepoError(_(b'repository %s already exists') % path)
3737
3739
3738 if b'sharedrepo' in createopts:
3740 if b'sharedrepo' in createopts:
3739 sharedpath = createopts[b'sharedrepo'].sharedpath
3741 sharedpath = createopts[b'sharedrepo'].sharedpath
3740
3742
3741 if createopts.get(b'sharedrelative'):
3743 if createopts.get(b'sharedrelative'):
3742 try:
3744 try:
3743 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3745 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3744 except (IOError, ValueError) as e:
3746 except (IOError, ValueError) as e:
3745 # ValueError is raised on Windows if the drive letters differ
3747 # ValueError is raised on Windows if the drive letters differ
3746 # on each path.
3748 # on each path.
3747 raise error.Abort(
3749 raise error.Abort(
3748 _(b'cannot calculate relative path'),
3750 _(b'cannot calculate relative path'),
3749 hint=stringutil.forcebytestr(e),
3751 hint=stringutil.forcebytestr(e),
3750 )
3752 )
3751
3753
3752 if not wdirvfs.exists():
3754 if not wdirvfs.exists():
3753 wdirvfs.makedirs()
3755 wdirvfs.makedirs()
3754
3756
3755 hgvfs.makedir(notindexed=True)
3757 hgvfs.makedir(notindexed=True)
3756 if b'sharedrepo' not in createopts:
3758 if b'sharedrepo' not in createopts:
3757 hgvfs.mkdir(b'cache')
3759 hgvfs.mkdir(b'cache')
3758 hgvfs.mkdir(b'wcache')
3760 hgvfs.mkdir(b'wcache')
3759
3761
3760 if b'store' in requirements and b'sharedrepo' not in createopts:
3762 if b'store' in requirements and b'sharedrepo' not in createopts:
3761 hgvfs.mkdir(b'store')
3763 hgvfs.mkdir(b'store')
3762
3764
3763 # We create an invalid changelog outside the store so very old
3765 # We create an invalid changelog outside the store so very old
3764 # Mercurial versions (which didn't know about the requirements
3766 # Mercurial versions (which didn't know about the requirements
3765 # file) encounter an error on reading the changelog. This
3767 # file) encounter an error on reading the changelog. This
3766 # effectively locks out old clients and prevents them from
3768 # effectively locks out old clients and prevents them from
3767 # mucking with a repo in an unknown format.
3769 # mucking with a repo in an unknown format.
3768 #
3770 #
3769 # The revlog header has version 2, which won't be recognized by
3771 # The revlog header has version 2, which won't be recognized by
3770 # such old clients.
3772 # such old clients.
3771 hgvfs.append(
3773 hgvfs.append(
3772 b'00changelog.i',
3774 b'00changelog.i',
3773 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3775 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3774 b'layout',
3776 b'layout',
3775 )
3777 )
3776
3778
3777 scmutil.writerequires(hgvfs, requirements)
3779 scmutil.writerequires(hgvfs, requirements)
3778
3780
3779 # Write out file telling readers where to find the shared store.
3781 # Write out file telling readers where to find the shared store.
3780 if b'sharedrepo' in createopts:
3782 if b'sharedrepo' in createopts:
3781 hgvfs.write(b'sharedpath', sharedpath)
3783 hgvfs.write(b'sharedpath', sharedpath)
3782
3784
3783 if createopts.get(b'shareditems'):
3785 if createopts.get(b'shareditems'):
3784 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3786 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3785 hgvfs.write(b'shared', shared)
3787 hgvfs.write(b'shared', shared)
3786
3788
3787
3789
3788 def poisonrepository(repo):
3790 def poisonrepository(repo):
3789 """Poison a repository instance so it can no longer be used."""
3791 """Poison a repository instance so it can no longer be used."""
3790 # Perform any cleanup on the instance.
3792 # Perform any cleanup on the instance.
3791 repo.close()
3793 repo.close()
3792
3794
3793 # Our strategy is to replace the type of the object with one that
3795 # Our strategy is to replace the type of the object with one that
3794 # has all attribute lookups result in error.
3796 # has all attribute lookups result in error.
3795 #
3797 #
3796 # But we have to allow the close() method because some constructors
3798 # But we have to allow the close() method because some constructors
3797 # of repos call close() on repo references.
3799 # of repos call close() on repo references.
3798 class poisonedrepository(object):
3800 class poisonedrepository(object):
3799 def __getattribute__(self, item):
3801 def __getattribute__(self, item):
3800 if item == 'close':
3802 if item == 'close':
3801 return object.__getattribute__(self, item)
3803 return object.__getattribute__(self, item)
3802
3804
3803 raise error.ProgrammingError(
3805 raise error.ProgrammingError(
3804 b'repo instances should not be used after unshare'
3806 b'repo instances should not be used after unshare'
3805 )
3807 )
3806
3808
3807 def close(self):
3809 def close(self):
3808 pass
3810 pass
3809
3811
3810 # We may have a repoview, which intercepts __setattr__. So be sure
3812 # We may have a repoview, which intercepts __setattr__. So be sure
3811 # we operate at the lowest level possible.
3813 # we operate at the lowest level possible.
3812 object.__setattr__(repo, '__class__', poisonedrepository)
3814 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,294 +1,299 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 $ hg clone http://localhost:$HGPORT/ copy
3 $ hg clone http://localhost:$HGPORT/ copy
4 abort: * (glob)
4 abort: * (glob)
5 [255]
5 [255]
6 $ test -d copy
6 $ test -d copy
7 [1]
7 [1]
8
8
9 This server doesn't do range requests so it's basically only good for
9 This server doesn't do range requests so it's basically only good for
10 one pull
10 one pull
11
11
12 $ "$PYTHON" "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
12 $ "$PYTHON" "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
13 > --logfile server.log
13 > --logfile server.log
14 $ cat dumb.pid >> $DAEMON_PIDS
14 $ cat dumb.pid >> $DAEMON_PIDS
15 $ hg init remote
15 $ hg init remote
16 $ cd remote
16 $ cd remote
17 $ echo foo > bar
17 $ echo foo > bar
18 $ echo c2 > '.dotfile with spaces'
18 $ echo c2 > '.dotfile with spaces'
19 $ hg add
19 $ hg add
20 adding .dotfile with spaces
20 adding .dotfile with spaces
21 adding bar
21 adding bar
22 $ hg commit -m"test"
22 $ hg commit -m"test"
23 $ hg tip
23 $ hg tip
24 changeset: 0:02770d679fb8
24 changeset: 0:02770d679fb8
25 tag: tip
25 tag: tip
26 user: test
26 user: test
27 date: Thu Jan 01 00:00:00 1970 +0000
27 date: Thu Jan 01 00:00:00 1970 +0000
28 summary: test
28 summary: test
29
29
30 $ cd ..
30 $ cd ..
31 $ hg clone static-http://localhost:$HGPORT/remote local
31 $ hg clone static-http://localhost:$HGPORT/remote local
32 requesting all changes
32 requesting all changes
33 adding changesets
33 adding changesets
34 adding manifests
34 adding manifests
35 adding file changes
35 adding file changes
36 added 1 changesets with 2 changes to 2 files
36 added 1 changesets with 2 changes to 2 files
37 new changesets 02770d679fb8
37 new changesets 02770d679fb8
38 updating to branch default
38 updating to branch default
39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 $ cd local
40 $ cd local
41 $ hg verify
41 $ hg verify
42 checking changesets
42 checking changesets
43 checking manifests
43 checking manifests
44 crosschecking files in changesets and manifests
44 crosschecking files in changesets and manifests
45 checking files
45 checking files
46 checked 1 changesets with 2 changes to 2 files
46 checked 1 changesets with 2 changes to 2 files
47 $ cat bar
47 $ cat bar
48 foo
48 foo
49 $ cd ../remote
49 $ cd ../remote
50 $ echo baz > quux
50 $ echo baz > quux
51 $ hg commit -A -mtest2
51 $ hg commit -A -mtest2
52 adding quux
52 adding quux
53
53
54 check for HTTP opener failures when cachefile does not exist
54 check for HTTP opener failures when cachefile does not exist
55
55
56 $ rm .hg/cache/*
56 $ rm .hg/cache/*
57 $ cd ../local
57 $ cd ../local
58 $ cat >> .hg/hgrc <<EOF
58 $ cat >> .hg/hgrc <<EOF
59 > [hooks]
59 > [hooks]
60 > changegroup = sh -c "printenv.py --line changegroup"
60 > changegroup = sh -c "printenv.py --line changegroup"
61 > EOF
61 > EOF
62 $ hg pull
62 $ hg pull
63 pulling from static-http://localhost:$HGPORT/remote
63 pulling from static-http://localhost:$HGPORT/remote
64 searching for changes
64 searching for changes
65 adding changesets
65 adding changesets
66 adding manifests
66 adding manifests
67 adding file changes
67 adding file changes
68 added 1 changesets with 1 changes to 1 files
68 added 1 changesets with 1 changes to 1 files
69 new changesets 4ac2e3648604
69 new changesets 4ac2e3648604
70 changegroup hook: HG_HOOKNAME=changegroup
70 changegroup hook: HG_HOOKNAME=changegroup
71 HG_HOOKTYPE=changegroup
71 HG_HOOKTYPE=changegroup
72 HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432
72 HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432
73 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432
73 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432
74 HG_SOURCE=pull
74 HG_SOURCE=pull
75 HG_TXNID=TXN:$ID$
75 HG_TXNID=TXN:$ID$
76 HG_TXNNAME=pull
76 HG_TXNNAME=pull
77 http://localhost:$HGPORT/remote
77 http://localhost:$HGPORT/remote
78 HG_URL=http://localhost:$HGPORT/remote
78 HG_URL=http://localhost:$HGPORT/remote
79
79
80 (run 'hg update' to get a working copy)
80 (run 'hg update' to get a working copy)
81
81
82 trying to push
82 trying to push
83
83
84 $ hg update
84 $ hg update
85 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 $ echo more foo >> bar
86 $ echo more foo >> bar
87 $ hg commit -m"test"
87 $ hg commit -m"test"
88 $ hg push
88 $ hg push
89 pushing to static-http://localhost:$HGPORT/remote
89 pushing to static-http://localhost:$HGPORT/remote
90 abort: destination does not support push
90 abort: destination does not support push
91 [255]
91 [255]
92
92
93 trying clone -r
93 trying clone -r
94
94
95 $ cd ..
95 $ cd ..
96 $ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0
96 $ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0
97 abort: unknown revision 'doesnotexist'!
97 abort: unknown revision 'doesnotexist'!
98 [255]
98 [255]
99 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
99 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
100 adding changesets
100 adding changesets
101 adding manifests
101 adding manifests
102 adding file changes
102 adding file changes
103 added 1 changesets with 2 changes to 2 files
103 added 1 changesets with 2 changes to 2 files
104 new changesets 02770d679fb8
104 new changesets 02770d679fb8
105 updating to branch default
105 updating to branch default
106 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
106 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
107
107
108 test with "/" URI (issue747) and subrepo
108 test with "/" URI (issue747) and subrepo
109
109
110 $ hg init
110 $ hg init
111 $ hg init sub
111 $ hg init sub
112 $ touch sub/test
112 $ touch sub/test
113 $ hg -R sub commit -A -m "test"
113 $ hg -R sub commit -A -m "test"
114 adding test
114 adding test
115 $ hg -R sub tag not-empty
115 $ hg -R sub tag not-empty
116 $ echo sub=sub > .hgsub
116 $ echo sub=sub > .hgsub
117 $ echo a > a
117 $ echo a > a
118 $ hg add a .hgsub
118 $ hg add a .hgsub
119 $ hg -q ci -ma
119 $ hg -q ci -ma
120 $ hg clone static-http://localhost:$HGPORT/ local2
120 $ hg clone static-http://localhost:$HGPORT/ local2
121 requesting all changes
121 requesting all changes
122 adding changesets
122 adding changesets
123 adding manifests
123 adding manifests
124 adding file changes
124 adding file changes
125 added 1 changesets with 3 changes to 3 files
125 added 1 changesets with 3 changes to 3 files
126 new changesets a9ebfbe8e587
126 new changesets a9ebfbe8e587
127 updating to branch default
127 updating to branch default
128 cloning subrepo sub from static-http://localhost:$HGPORT/sub
128 cloning subrepo sub from static-http://localhost:$HGPORT/sub
129 requesting all changes
129 requesting all changes
130 adding changesets
130 adding changesets
131 adding manifests
131 adding manifests
132 adding file changes
132 adding file changes
133 added 2 changesets with 2 changes to 2 files
133 added 2 changesets with 2 changes to 2 files
134 new changesets be090ea66256:322ea90975df
134 new changesets be090ea66256:322ea90975df
135 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
135 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
136 $ cd local2
136 $ cd local2
137 $ hg verify
137 $ hg verify
138 checking changesets
138 checking changesets
139 checking manifests
139 checking manifests
140 crosschecking files in changesets and manifests
140 crosschecking files in changesets and manifests
141 checking files
141 checking files
142 checked 1 changesets with 3 changes to 3 files
142 checked 1 changesets with 3 changes to 3 files
143 checking subrepo links
143 checking subrepo links
144 $ cat a
144 $ cat a
145 a
145 a
146 $ hg paths
146 $ hg paths
147 default = static-http://localhost:$HGPORT/
147 default = static-http://localhost:$HGPORT/
148
148
149 test with empty repo (issue965)
149 test with empty repo (issue965)
150
150
151 $ cd ..
151 $ cd ..
152 $ hg init remotempty
152 $ hg init remotempty
153 $ hg clone static-http://localhost:$HGPORT/remotempty local3
153 $ hg clone static-http://localhost:$HGPORT/remotempty local3
154 no changes found
154 no changes found
155 updating to branch default
155 updating to branch default
156 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
156 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 $ cd local3
157 $ cd local3
158 $ hg verify
158 $ hg verify
159 checking changesets
159 checking changesets
160 checking manifests
160 checking manifests
161 crosschecking files in changesets and manifests
161 crosschecking files in changesets and manifests
162 checking files
162 checking files
163 checked 0 changesets with 0 changes to 0 files
163 checked 0 changesets with 0 changes to 0 files
164 $ hg paths
164 $ hg paths
165 default = static-http://localhost:$HGPORT/remotempty
165 default = static-http://localhost:$HGPORT/remotempty
166
166
167 test with non-repo
167 test with non-repo
168
168
169 $ cd ..
169 $ cd ..
170 $ mkdir notarepo
170 $ mkdir notarepo
171 $ hg clone static-http://localhost:$HGPORT/notarepo local3
171 $ hg clone static-http://localhost:$HGPORT/notarepo local3
172 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository!
172 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository!
173 [255]
173 [255]
174
174
175 Clone with tags and branches works
175 Clone with tags and branches works
176
176
177 $ hg init remote-with-names
177 $ hg init remote-with-names
178 $ cd remote-with-names
178 $ cd remote-with-names
179 $ echo 0 > foo
179 $ echo 0 > foo
180 $ hg -q commit -A -m initial
180 $ hg -q commit -A -m initial
181 $ echo 1 > foo
181 $ echo 1 > foo
182 $ hg commit -m 'commit 1'
182 $ hg commit -m 'commit 1'
183 $ hg -q up 0
183 $ hg -q up 0
184 $ hg branch mybranch
184 $ hg branch mybranch
185 marked working directory as branch mybranch
185 marked working directory as branch mybranch
186 (branches are permanent and global, did you want a bookmark?)
186 (branches are permanent and global, did you want a bookmark?)
187 $ echo 2 > foo
187 $ echo 2 > foo
188 $ hg commit -m 'commit 2 (mybranch)'
188 $ hg commit -m 'commit 2 (mybranch)'
189 $ hg tag -r 1 'default-tag'
189 $ hg tag -r 1 'default-tag'
190 $ hg tag -r 2 'branch-tag'
190 $ hg tag -r 2 'branch-tag'
191
191
192 $ cd ..
192 $ cd ..
193
193
194 $ hg clone static-http://localhost:$HGPORT/remote-with-names local-with-names
194 $ hg clone static-http://localhost:$HGPORT/remote-with-names local-with-names
195 requesting all changes
195 requesting all changes
196 adding changesets
196 adding changesets
197 adding manifests
197 adding manifests
198 adding file changes
198 adding file changes
199 added 5 changesets with 5 changes to 2 files (+1 heads)
199 added 5 changesets with 5 changes to 2 files (+1 heads)
200 new changesets 68986213bd44:0c325bd2b5a7
200 new changesets 68986213bd44:0c325bd2b5a7
201 updating to branch default
201 updating to branch default
202 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
203
203
204 Clone a specific branch works
204 Clone a specific branch works
205
205
206 $ hg clone -r mybranch static-http://localhost:$HGPORT/remote-with-names local-with-names-branch
206 $ hg clone -r mybranch static-http://localhost:$HGPORT/remote-with-names local-with-names-branch
207 adding changesets
207 adding changesets
208 adding manifests
208 adding manifests
209 adding file changes
209 adding file changes
210 added 4 changesets with 4 changes to 2 files
210 added 4 changesets with 4 changes to 2 files
211 new changesets 68986213bd44:0c325bd2b5a7
211 new changesets 68986213bd44:0c325bd2b5a7
212 updating to branch mybranch
212 updating to branch mybranch
213 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
213 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
214
214
215 Clone a specific tag works
215 Clone a specific tag works
216
216
217 $ hg clone -r default-tag static-http://localhost:$HGPORT/remote-with-names local-with-names-tag
217 $ hg clone -r default-tag static-http://localhost:$HGPORT/remote-with-names local-with-names-tag
218 adding changesets
218 adding changesets
219 adding manifests
219 adding manifests
220 adding file changes
220 adding file changes
221 added 2 changesets with 2 changes to 1 files
221 added 2 changesets with 2 changes to 1 files
222 new changesets 68986213bd44:4ee3fcef1c80
222 new changesets 68986213bd44:4ee3fcef1c80
223 updating to branch default
223 updating to branch default
224 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
224 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
225
225
226 $ killdaemons.py
226 $ killdaemons.py
227
227
228 List of files accessed over HTTP:
228 List of files accessed over HTTP:
229
229
230 $ cat server.log | sed -n -e 's|.*GET \(/[^ ]*\).*|\1|p' | sort -u
230 $ cat server.log | sed -n -e 's|.*GET \(/[^ ]*\).*|\1|p' | sort -u
231 /.hg/bookmarks
231 /.hg/bookmarks
232 /.hg/bookmarks.current
232 /.hg/bookmarks.current
233 /.hg/cache/hgtagsfnodes1
233 /.hg/cache/hgtagsfnodes1
234 /.hg/cache/rbc-names-v1
234 /.hg/cache/rbc-names-v1
235 /.hg/cache/rbc-revs-v1
235 /.hg/cache/rbc-revs-v1
236 /.hg/dirstate
236 /.hg/requires
237 /.hg/requires
237 /.hg/store/00changelog.i
238 /.hg/store/00changelog.i
238 /.hg/store/00manifest.i
239 /.hg/store/00manifest.i
239 /.hg/store/data/%7E2ehgsub.i (no-py37 !)
240 /.hg/store/data/%7E2ehgsub.i (no-py37 !)
240 /.hg/store/data/%7E2ehgsubstate.i (no-py37 !)
241 /.hg/store/data/%7E2ehgsubstate.i (no-py37 !)
241 /.hg/store/data/a.i
242 /.hg/store/data/a.i
242 /.hg/store/data/~2ehgsub.i (py37 !)
243 /.hg/store/data/~2ehgsub.i (py37 !)
243 /.hg/store/data/~2ehgsubstate.i (py37 !)
244 /.hg/store/data/~2ehgsubstate.i (py37 !)
244 /notarepo/.hg/00changelog.i
245 /notarepo/.hg/00changelog.i
245 /notarepo/.hg/requires
246 /notarepo/.hg/requires
246 /remote-with-names/.hg/bookmarks
247 /remote-with-names/.hg/bookmarks
247 /remote-with-names/.hg/bookmarks.current
248 /remote-with-names/.hg/bookmarks.current
248 /remote-with-names/.hg/cache/branch2-served
249 /remote-with-names/.hg/cache/branch2-served
249 /remote-with-names/.hg/cache/hgtagsfnodes1
250 /remote-with-names/.hg/cache/hgtagsfnodes1
250 /remote-with-names/.hg/cache/rbc-names-v1
251 /remote-with-names/.hg/cache/rbc-names-v1
251 /remote-with-names/.hg/cache/rbc-revs-v1
252 /remote-with-names/.hg/cache/rbc-revs-v1
252 /remote-with-names/.hg/cache/tags2-served
253 /remote-with-names/.hg/cache/tags2-served
254 /remote-with-names/.hg/dirstate
253 /remote-with-names/.hg/localtags
255 /remote-with-names/.hg/localtags
254 /remote-with-names/.hg/requires
256 /remote-with-names/.hg/requires
255 /remote-with-names/.hg/store/00changelog.i
257 /remote-with-names/.hg/store/00changelog.i
256 /remote-with-names/.hg/store/00manifest.i
258 /remote-with-names/.hg/store/00manifest.i
257 /remote-with-names/.hg/store/data/%7E2ehgtags.i (no-py37 !)
259 /remote-with-names/.hg/store/data/%7E2ehgtags.i (no-py37 !)
258 /remote-with-names/.hg/store/data/foo.i
260 /remote-with-names/.hg/store/data/foo.i
259 /remote-with-names/.hg/store/data/~2ehgtags.i (py37 !)
261 /remote-with-names/.hg/store/data/~2ehgtags.i (py37 !)
260 /remote/.hg/bookmarks
262 /remote/.hg/bookmarks
261 /remote/.hg/bookmarks.current
263 /remote/.hg/bookmarks.current
262 /remote/.hg/cache/branch2-base
264 /remote/.hg/cache/branch2-base
263 /remote/.hg/cache/branch2-immutable
265 /remote/.hg/cache/branch2-immutable
264 /remote/.hg/cache/branch2-served
266 /remote/.hg/cache/branch2-served
265 /remote/.hg/cache/hgtagsfnodes1
267 /remote/.hg/cache/hgtagsfnodes1
266 /remote/.hg/cache/rbc-names-v1
268 /remote/.hg/cache/rbc-names-v1
267 /remote/.hg/cache/rbc-revs-v1
269 /remote/.hg/cache/rbc-revs-v1
268 /remote/.hg/cache/tags2-served
270 /remote/.hg/cache/tags2-served
271 /remote/.hg/dirstate
269 /remote/.hg/localtags
272 /remote/.hg/localtags
270 /remote/.hg/requires
273 /remote/.hg/requires
271 /remote/.hg/store/00changelog.i
274 /remote/.hg/store/00changelog.i
272 /remote/.hg/store/00manifest.i
275 /remote/.hg/store/00manifest.i
273 /remote/.hg/store/data/%7E2edotfile%20with%20spaces.i (no-py37 !)
276 /remote/.hg/store/data/%7E2edotfile%20with%20spaces.i (no-py37 !)
274 /remote/.hg/store/data/%7E2ehgtags.i (no-py37 !)
277 /remote/.hg/store/data/%7E2ehgtags.i (no-py37 !)
275 /remote/.hg/store/data/bar.i
278 /remote/.hg/store/data/bar.i
276 /remote/.hg/store/data/quux.i
279 /remote/.hg/store/data/quux.i
277 /remote/.hg/store/data/~2edotfile%20with%20spaces.i (py37 !)
280 /remote/.hg/store/data/~2edotfile%20with%20spaces.i (py37 !)
278 /remote/.hg/store/data/~2ehgtags.i (py37 !)
281 /remote/.hg/store/data/~2ehgtags.i (py37 !)
279 /remotempty/.hg/bookmarks
282 /remotempty/.hg/bookmarks
280 /remotempty/.hg/bookmarks.current
283 /remotempty/.hg/bookmarks.current
284 /remotempty/.hg/dirstate
281 /remotempty/.hg/requires
285 /remotempty/.hg/requires
282 /remotempty/.hg/store/00changelog.i
286 /remotempty/.hg/store/00changelog.i
283 /remotempty/.hg/store/00manifest.i
287 /remotempty/.hg/store/00manifest.i
284 /sub/.hg/bookmarks
288 /sub/.hg/bookmarks
285 /sub/.hg/bookmarks.current
289 /sub/.hg/bookmarks.current
286 /sub/.hg/cache/hgtagsfnodes1
290 /sub/.hg/cache/hgtagsfnodes1
287 /sub/.hg/cache/rbc-names-v1
291 /sub/.hg/cache/rbc-names-v1
288 /sub/.hg/cache/rbc-revs-v1
292 /sub/.hg/cache/rbc-revs-v1
293 /sub/.hg/dirstate
289 /sub/.hg/requires
294 /sub/.hg/requires
290 /sub/.hg/store/00changelog.i
295 /sub/.hg/store/00changelog.i
291 /sub/.hg/store/00manifest.i
296 /sub/.hg/store/00manifest.i
292 /sub/.hg/store/data/%7E2ehgtags.i (no-py37 !)
297 /sub/.hg/store/data/%7E2ehgtags.i (no-py37 !)
293 /sub/.hg/store/data/test.i
298 /sub/.hg/store/data/test.i
294 /sub/.hg/store/data/~2ehgtags.i (py37 !)
299 /sub/.hg/store/data/~2ehgtags.i (py37 !)
General Comments 0
You need to be logged in to leave comments. Login now