##// END OF EJS Templates
dirstate: replace `update_parent_file_data` with simpler `update_parent` call...
marmoute -
r48491:33beeb32 default
parent child Browse files
Show More
@@ -1,1631 +1,1624 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_parents_change
502 @requires_parents_change
503 def update_file_reference(
503 def update_file_reference(
504 self,
504 self,
505 filename,
505 filename,
506 p1_tracked,
506 p1_tracked,
507 ):
507 ):
508 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
509
509
510 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
511 rewriting operation.
512
512
513 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
515 """
515 """
516 if self.in_merge:
516 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
519 entry = self._map.get(filename)
520 if entry is None:
520 if entry is None:
521 wc_tracked = False
521 wc_tracked = False
522 else:
522 else:
523 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
524 if p1_tracked and wc_tracked:
524 if p1_tracked and wc_tracked:
525 # the underlying reference might have changed, we will have to
525 # the underlying reference might have changed, we will have to
526 # check it.
526 # check it.
527 self.normallookup(filename)
527 self.normallookup(filename)
528 elif not (p1_tracked or wc_tracked):
528 elif not (p1_tracked or wc_tracked):
529 # the file is no longer relevant to anyone
529 # the file is no longer relevant to anyone
530 self._drop(filename)
530 self._drop(filename)
531 elif (not p1_tracked) and wc_tracked:
531 elif (not p1_tracked) and wc_tracked:
532 if not entry.added:
532 if not entry.added:
533 self._add(filename)
533 self._add(filename)
534 elif p1_tracked and not wc_tracked:
534 elif p1_tracked and not wc_tracked:
535 if entry is None or not entry.removed:
535 if entry is None or not entry.removed:
536 self._remove(filename)
536 self._remove(filename)
537 else:
537 else:
538 assert False, 'unreachable'
538 assert False, 'unreachable'
539
539
540 @requires_parents_change
540 @requires_parents_change
541 def update_file(
541 def update_file(
542 self,
542 self,
543 filename,
543 filename,
544 wc_tracked,
544 wc_tracked,
545 p1_tracked,
545 p1_tracked,
546 p2_tracked=False,
546 p2_tracked=False,
547 merged=False,
547 merged=False,
548 clean_p1=False,
548 clean_p1=False,
549 clean_p2=False,
549 clean_p2=False,
550 possibly_dirty=False,
550 possibly_dirty=False,
551 parentfiledata=None,
551 ):
552 ):
552 """update the information about a file in the dirstate
553 """update the information about a file in the dirstate
553
554
554 This is to be called when the direstates parent changes to keep track
555 This is to be called when the direstates parent changes to keep track
555 of what is the file situation in regards to the working copy and its parent.
556 of what is the file situation in regards to the working copy and its parent.
556
557
557 This function must be called within a `dirstate.parentchange` context.
558 This function must be called within a `dirstate.parentchange` context.
558
559
559 note: the API is at an early stage and we might need to ajust it
560 note: the API is at an early stage and we might need to ajust it
560 depending of what information ends up being relevant and useful to
561 depending of what information ends up being relevant and useful to
561 other processing.
562 other processing.
562 """
563 """
563 if merged and (clean_p1 or clean_p2):
564 if merged and (clean_p1 or clean_p2):
564 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
565 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
565 raise error.ProgrammingError(msg)
566 raise error.ProgrammingError(msg)
566 if not (p1_tracked or p2_tracked or wc_tracked):
567 if not (p1_tracked or p2_tracked or wc_tracked):
567 self._drop(filename)
568 self._drop(filename)
568 elif merged:
569 elif merged:
569 assert wc_tracked
570 assert wc_tracked
570 assert self.in_merge # we are never in the "normallookup" case
571 assert self.in_merge # we are never in the "normallookup" case
571 self.otherparent(filename)
572 self.otherparent(filename)
572 elif not (p1_tracked or p2_tracked) and wc_tracked:
573 elif not (p1_tracked or p2_tracked) and wc_tracked:
573 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
574 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
574 self._map.copymap.pop(filename, None)
575 self._map.copymap.pop(filename, None)
575 elif (p1_tracked or p2_tracked) and not wc_tracked:
576 elif (p1_tracked or p2_tracked) and not wc_tracked:
576 self._remove(filename)
577 self._remove(filename)
577 elif clean_p2 and wc_tracked:
578 elif clean_p2 and wc_tracked:
578 assert p2_tracked
579 assert p2_tracked
579 self.otherparent(filename)
580 self.otherparent(filename)
580 elif not p1_tracked and p2_tracked and wc_tracked:
581 elif not p1_tracked and p2_tracked and wc_tracked:
581 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
582 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
582 self._map.copymap.pop(filename, None)
583 self._map.copymap.pop(filename, None)
583 elif possibly_dirty:
584 elif possibly_dirty:
584 self._addpath(filename, possibly_dirty=possibly_dirty)
585 self._addpath(filename, possibly_dirty=possibly_dirty)
585 elif wc_tracked:
586 elif wc_tracked:
586 self.normal(filename)
587 self.normal(filename, parentfiledata=parentfiledata)
587 # XXX We need something for file that are dirty after an update
588 # XXX We need something for file that are dirty after an update
588 else:
589 else:
589 assert False, 'unreachable'
590 assert False, 'unreachable'
590
591
591 @requires_parents_change
592 def update_parent_file_data(self, f, filedata):
593 """update the information about the content of a file
594
595 This function should be called within a `dirstate.parentchange` context.
596 """
597 self.normal(f, parentfiledata=filedata)
598
599 def _addpath(
592 def _addpath(
600 self,
593 self,
601 f,
594 f,
602 mode=0,
595 mode=0,
603 size=None,
596 size=None,
604 mtime=None,
597 mtime=None,
605 added=False,
598 added=False,
606 merged=False,
599 merged=False,
607 from_p2=False,
600 from_p2=False,
608 possibly_dirty=False,
601 possibly_dirty=False,
609 ):
602 ):
610 entry = self._map.get(f)
603 entry = self._map.get(f)
611 if added or entry is not None and entry.removed:
604 if added or entry is not None and entry.removed:
612 scmutil.checkfilename(f)
605 scmutil.checkfilename(f)
613 if self._map.hastrackeddir(f):
606 if self._map.hastrackeddir(f):
614 msg = _(b'directory %r already in dirstate')
607 msg = _(b'directory %r already in dirstate')
615 msg %= pycompat.bytestr(f)
608 msg %= pycompat.bytestr(f)
616 raise error.Abort(msg)
609 raise error.Abort(msg)
617 # shadows
610 # shadows
618 for d in pathutil.finddirs(f):
611 for d in pathutil.finddirs(f):
619 if self._map.hastrackeddir(d):
612 if self._map.hastrackeddir(d):
620 break
613 break
621 entry = self._map.get(d)
614 entry = self._map.get(d)
622 if entry is not None and not entry.removed:
615 if entry is not None and not entry.removed:
623 msg = _(b'file %r in dirstate clashes with %r')
616 msg = _(b'file %r in dirstate clashes with %r')
624 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
617 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
625 raise error.Abort(msg)
618 raise error.Abort(msg)
626 self._dirty = True
619 self._dirty = True
627 self._updatedfiles.add(f)
620 self._updatedfiles.add(f)
628 self._map.addfile(
621 self._map.addfile(
629 f,
622 f,
630 mode=mode,
623 mode=mode,
631 size=size,
624 size=size,
632 mtime=mtime,
625 mtime=mtime,
633 added=added,
626 added=added,
634 merged=merged,
627 merged=merged,
635 from_p2=from_p2,
628 from_p2=from_p2,
636 possibly_dirty=possibly_dirty,
629 possibly_dirty=possibly_dirty,
637 )
630 )
638
631
639 def _get_filedata(self, filename):
632 def _get_filedata(self, filename):
640 """returns"""
633 """returns"""
641 s = os.lstat(self._join(filename))
634 s = os.lstat(self._join(filename))
642 mode = s.st_mode
635 mode = s.st_mode
643 size = s.st_size
636 size = s.st_size
644 mtime = s[stat.ST_MTIME]
637 mtime = s[stat.ST_MTIME]
645 return (mode, size, mtime)
638 return (mode, size, mtime)
646
639
647 def normal(self, f, parentfiledata=None):
640 def normal(self, f, parentfiledata=None):
648 """Mark a file normal and clean.
641 """Mark a file normal and clean.
649
642
650 parentfiledata: (mode, size, mtime) of the clean file
643 parentfiledata: (mode, size, mtime) of the clean file
651
644
652 parentfiledata should be computed from memory (for mode,
645 parentfiledata should be computed from memory (for mode,
653 size), as or close as possible from the point where we
646 size), as or close as possible from the point where we
654 determined the file was clean, to limit the risk of the
647 determined the file was clean, to limit the risk of the
655 file having been changed by an external process between the
648 file having been changed by an external process between the
656 moment where the file was determined to be clean and now."""
649 moment where the file was determined to be clean and now."""
657 if parentfiledata:
650 if parentfiledata:
658 (mode, size, mtime) = parentfiledata
651 (mode, size, mtime) = parentfiledata
659 else:
652 else:
660 (mode, size, mtime) = self._get_filedata(f)
653 (mode, size, mtime) = self._get_filedata(f)
661 self._addpath(f, mode=mode, size=size, mtime=mtime)
654 self._addpath(f, mode=mode, size=size, mtime=mtime)
662 self._map.copymap.pop(f, None)
655 self._map.copymap.pop(f, None)
663 if f in self._map.nonnormalset:
656 if f in self._map.nonnormalset:
664 self._map.nonnormalset.remove(f)
657 self._map.nonnormalset.remove(f)
665 if mtime > self._lastnormaltime:
658 if mtime > self._lastnormaltime:
666 # Remember the most recent modification timeslot for status(),
659 # Remember the most recent modification timeslot for status(),
667 # to make sure we won't miss future size-preserving file content
660 # to make sure we won't miss future size-preserving file content
668 # modifications that happen within the same timeslot.
661 # modifications that happen within the same timeslot.
669 self._lastnormaltime = mtime
662 self._lastnormaltime = mtime
670
663
671 def normallookup(self, f):
664 def normallookup(self, f):
672 '''Mark a file normal, but possibly dirty.'''
665 '''Mark a file normal, but possibly dirty.'''
673 if self.in_merge:
666 if self.in_merge:
674 # if there is a merge going on and the file was either
667 # if there is a merge going on and the file was either
675 # "merged" or coming from other parent (-2) before
668 # "merged" or coming from other parent (-2) before
676 # being removed, restore that state.
669 # being removed, restore that state.
677 entry = self._map.get(f)
670 entry = self._map.get(f)
678 if entry is not None:
671 if entry is not None:
679 # XXX this should probably be dealt with a a lower level
672 # XXX this should probably be dealt with a a lower level
680 # (see `merged_removed` and `from_p2_removed`)
673 # (see `merged_removed` and `from_p2_removed`)
681 if entry.merged_removed or entry.from_p2_removed:
674 if entry.merged_removed or entry.from_p2_removed:
682 source = self._map.copymap.get(f)
675 source = self._map.copymap.get(f)
683 if entry.merged_removed:
676 if entry.merged_removed:
684 self.merge(f)
677 self.merge(f)
685 elif entry.from_p2_removed:
678 elif entry.from_p2_removed:
686 self.otherparent(f)
679 self.otherparent(f)
687 if source is not None:
680 if source is not None:
688 self.copy(source, f)
681 self.copy(source, f)
689 return
682 return
690 elif entry.merged or entry.from_p2:
683 elif entry.merged or entry.from_p2:
691 return
684 return
692 self._addpath(f, possibly_dirty=True)
685 self._addpath(f, possibly_dirty=True)
693 self._map.copymap.pop(f, None)
686 self._map.copymap.pop(f, None)
694
687
695 def otherparent(self, f):
688 def otherparent(self, f):
696 '''Mark as coming from the other parent, always dirty.'''
689 '''Mark as coming from the other parent, always dirty.'''
697 if not self.in_merge:
690 if not self.in_merge:
698 msg = _(b"setting %r to other parent only allowed in merges") % f
691 msg = _(b"setting %r to other parent only allowed in merges") % f
699 raise error.Abort(msg)
692 raise error.Abort(msg)
700 entry = self._map.get(f)
693 entry = self._map.get(f)
701 if entry is not None and entry.tracked:
694 if entry is not None and entry.tracked:
702 # merge-like
695 # merge-like
703 self._addpath(f, merged=True)
696 self._addpath(f, merged=True)
704 else:
697 else:
705 # add-like
698 # add-like
706 self._addpath(f, from_p2=True)
699 self._addpath(f, from_p2=True)
707 self._map.copymap.pop(f, None)
700 self._map.copymap.pop(f, None)
708
701
709 def add(self, f):
702 def add(self, f):
710 '''Mark a file added.'''
703 '''Mark a file added.'''
711 if not self.pendingparentchange():
704 if not self.pendingparentchange():
712 util.nouideprecwarn(
705 util.nouideprecwarn(
713 b"do not use `add` outside of update/merge context."
706 b"do not use `add` outside of update/merge context."
714 b" Use `set_tracked`",
707 b" Use `set_tracked`",
715 b'6.0',
708 b'6.0',
716 stacklevel=2,
709 stacklevel=2,
717 )
710 )
718 self._add(f)
711 self._add(f)
719
712
720 def _add(self, filename):
713 def _add(self, filename):
721 """internal function to mark a file as added"""
714 """internal function to mark a file as added"""
722 self._addpath(filename, added=True)
715 self._addpath(filename, added=True)
723 self._map.copymap.pop(filename, None)
716 self._map.copymap.pop(filename, None)
724
717
725 def remove(self, f):
718 def remove(self, f):
726 '''Mark a file removed'''
719 '''Mark a file removed'''
727 if not self.pendingparentchange():
720 if not self.pendingparentchange():
728 util.nouideprecwarn(
721 util.nouideprecwarn(
729 b"do not use `remove` outside of update/merge context."
722 b"do not use `remove` outside of update/merge context."
730 b" Use `set_untracked`",
723 b" Use `set_untracked`",
731 b'6.0',
724 b'6.0',
732 stacklevel=2,
725 stacklevel=2,
733 )
726 )
734 self._remove(f)
727 self._remove(f)
735
728
736 def _remove(self, filename):
729 def _remove(self, filename):
737 """internal function to mark a file removed"""
730 """internal function to mark a file removed"""
738 self._dirty = True
731 self._dirty = True
739 self._updatedfiles.add(filename)
732 self._updatedfiles.add(filename)
740 self._map.removefile(filename, in_merge=self.in_merge)
733 self._map.removefile(filename, in_merge=self.in_merge)
741
734
742 def merge(self, f):
735 def merge(self, f):
743 '''Mark a file merged.'''
736 '''Mark a file merged.'''
744 if not self.in_merge:
737 if not self.in_merge:
745 return self.normallookup(f)
738 return self.normallookup(f)
746 return self.otherparent(f)
739 return self.otherparent(f)
747
740
748 def drop(self, f):
741 def drop(self, f):
749 '''Drop a file from the dirstate'''
742 '''Drop a file from the dirstate'''
750 if not self.pendingparentchange():
743 if not self.pendingparentchange():
751 util.nouideprecwarn(
744 util.nouideprecwarn(
752 b"do not use `drop` outside of update/merge context."
745 b"do not use `drop` outside of update/merge context."
753 b" Use `set_untracked`",
746 b" Use `set_untracked`",
754 b'6.0',
747 b'6.0',
755 stacklevel=2,
748 stacklevel=2,
756 )
749 )
757 self._drop(f)
750 self._drop(f)
758
751
759 def _drop(self, filename):
752 def _drop(self, filename):
760 """internal function to drop a file from the dirstate"""
753 """internal function to drop a file from the dirstate"""
761 if self._map.dropfile(filename):
754 if self._map.dropfile(filename):
762 self._dirty = True
755 self._dirty = True
763 self._updatedfiles.add(filename)
756 self._updatedfiles.add(filename)
764 self._map.copymap.pop(filename, None)
757 self._map.copymap.pop(filename, None)
765
758
766 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
759 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
767 if exists is None:
760 if exists is None:
768 exists = os.path.lexists(os.path.join(self._root, path))
761 exists = os.path.lexists(os.path.join(self._root, path))
769 if not exists:
762 if not exists:
770 # Maybe a path component exists
763 # Maybe a path component exists
771 if not ignoremissing and b'/' in path:
764 if not ignoremissing and b'/' in path:
772 d, f = path.rsplit(b'/', 1)
765 d, f = path.rsplit(b'/', 1)
773 d = self._normalize(d, False, ignoremissing, None)
766 d = self._normalize(d, False, ignoremissing, None)
774 folded = d + b"/" + f
767 folded = d + b"/" + f
775 else:
768 else:
776 # No path components, preserve original case
769 # No path components, preserve original case
777 folded = path
770 folded = path
778 else:
771 else:
779 # recursively normalize leading directory components
772 # recursively normalize leading directory components
780 # against dirstate
773 # against dirstate
781 if b'/' in normed:
774 if b'/' in normed:
782 d, f = normed.rsplit(b'/', 1)
775 d, f = normed.rsplit(b'/', 1)
783 d = self._normalize(d, False, ignoremissing, True)
776 d = self._normalize(d, False, ignoremissing, True)
784 r = self._root + b"/" + d
777 r = self._root + b"/" + d
785 folded = d + b"/" + util.fspath(f, r)
778 folded = d + b"/" + util.fspath(f, r)
786 else:
779 else:
787 folded = util.fspath(normed, self._root)
780 folded = util.fspath(normed, self._root)
788 storemap[normed] = folded
781 storemap[normed] = folded
789
782
790 return folded
783 return folded
791
784
792 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
785 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
793 normed = util.normcase(path)
786 normed = util.normcase(path)
794 folded = self._map.filefoldmap.get(normed, None)
787 folded = self._map.filefoldmap.get(normed, None)
795 if folded is None:
788 if folded is None:
796 if isknown:
789 if isknown:
797 folded = path
790 folded = path
798 else:
791 else:
799 folded = self._discoverpath(
792 folded = self._discoverpath(
800 path, normed, ignoremissing, exists, self._map.filefoldmap
793 path, normed, ignoremissing, exists, self._map.filefoldmap
801 )
794 )
802 return folded
795 return folded
803
796
804 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
797 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
805 normed = util.normcase(path)
798 normed = util.normcase(path)
806 folded = self._map.filefoldmap.get(normed, None)
799 folded = self._map.filefoldmap.get(normed, None)
807 if folded is None:
800 if folded is None:
808 folded = self._map.dirfoldmap.get(normed, None)
801 folded = self._map.dirfoldmap.get(normed, None)
809 if folded is None:
802 if folded is None:
810 if isknown:
803 if isknown:
811 folded = path
804 folded = path
812 else:
805 else:
813 # store discovered result in dirfoldmap so that future
806 # store discovered result in dirfoldmap so that future
814 # normalizefile calls don't start matching directories
807 # normalizefile calls don't start matching directories
815 folded = self._discoverpath(
808 folded = self._discoverpath(
816 path, normed, ignoremissing, exists, self._map.dirfoldmap
809 path, normed, ignoremissing, exists, self._map.dirfoldmap
817 )
810 )
818 return folded
811 return folded
819
812
820 def normalize(self, path, isknown=False, ignoremissing=False):
813 def normalize(self, path, isknown=False, ignoremissing=False):
821 """
814 """
822 normalize the case of a pathname when on a casefolding filesystem
815 normalize the case of a pathname when on a casefolding filesystem
823
816
824 isknown specifies whether the filename came from walking the
817 isknown specifies whether the filename came from walking the
825 disk, to avoid extra filesystem access.
818 disk, to avoid extra filesystem access.
826
819
827 If ignoremissing is True, missing path are returned
820 If ignoremissing is True, missing path are returned
828 unchanged. Otherwise, we try harder to normalize possibly
821 unchanged. Otherwise, we try harder to normalize possibly
829 existing path components.
822 existing path components.
830
823
831 The normalized case is determined based on the following precedence:
824 The normalized case is determined based on the following precedence:
832
825
833 - version of name already stored in the dirstate
826 - version of name already stored in the dirstate
834 - version of name stored on disk
827 - version of name stored on disk
835 - version provided via command arguments
828 - version provided via command arguments
836 """
829 """
837
830
838 if self._checkcase:
831 if self._checkcase:
839 return self._normalize(path, isknown, ignoremissing)
832 return self._normalize(path, isknown, ignoremissing)
840 return path
833 return path
841
834
842 def clear(self):
835 def clear(self):
843 self._map.clear()
836 self._map.clear()
844 self._lastnormaltime = 0
837 self._lastnormaltime = 0
845 self._updatedfiles.clear()
838 self._updatedfiles.clear()
846 self._dirty = True
839 self._dirty = True
847
840
848 def rebuild(self, parent, allfiles, changedfiles=None):
841 def rebuild(self, parent, allfiles, changedfiles=None):
849 if changedfiles is None:
842 if changedfiles is None:
850 # Rebuild entire dirstate
843 # Rebuild entire dirstate
851 to_lookup = allfiles
844 to_lookup = allfiles
852 to_drop = []
845 to_drop = []
853 lastnormaltime = self._lastnormaltime
846 lastnormaltime = self._lastnormaltime
854 self.clear()
847 self.clear()
855 self._lastnormaltime = lastnormaltime
848 self._lastnormaltime = lastnormaltime
856 elif len(changedfiles) < 10:
849 elif len(changedfiles) < 10:
857 # Avoid turning allfiles into a set, which can be expensive if it's
850 # Avoid turning allfiles into a set, which can be expensive if it's
858 # large.
851 # large.
859 to_lookup = []
852 to_lookup = []
860 to_drop = []
853 to_drop = []
861 for f in changedfiles:
854 for f in changedfiles:
862 if f in allfiles:
855 if f in allfiles:
863 to_lookup.append(f)
856 to_lookup.append(f)
864 else:
857 else:
865 to_drop.append(f)
858 to_drop.append(f)
866 else:
859 else:
867 changedfilesset = set(changedfiles)
860 changedfilesset = set(changedfiles)
868 to_lookup = changedfilesset & set(allfiles)
861 to_lookup = changedfilesset & set(allfiles)
869 to_drop = changedfilesset - to_lookup
862 to_drop = changedfilesset - to_lookup
870
863
871 if self._origpl is None:
864 if self._origpl is None:
872 self._origpl = self._pl
865 self._origpl = self._pl
873 self._map.setparents(parent, self._nodeconstants.nullid)
866 self._map.setparents(parent, self._nodeconstants.nullid)
874
867
875 for f in to_lookup:
868 for f in to_lookup:
876 self.normallookup(f)
869 self.normallookup(f)
877 for f in to_drop:
870 for f in to_drop:
878 self._drop(f)
871 self._drop(f)
879
872
880 self._dirty = True
873 self._dirty = True
881
874
882 def identity(self):
875 def identity(self):
883 """Return identity of dirstate itself to detect changing in storage
876 """Return identity of dirstate itself to detect changing in storage
884
877
885 If identity of previous dirstate is equal to this, writing
878 If identity of previous dirstate is equal to this, writing
886 changes based on the former dirstate out can keep consistency.
879 changes based on the former dirstate out can keep consistency.
887 """
880 """
888 return self._map.identity
881 return self._map.identity
889
882
890 def write(self, tr):
883 def write(self, tr):
891 if not self._dirty:
884 if not self._dirty:
892 return
885 return
893
886
894 filename = self._filename
887 filename = self._filename
895 if tr:
888 if tr:
896 # 'dirstate.write()' is not only for writing in-memory
889 # 'dirstate.write()' is not only for writing in-memory
897 # changes out, but also for dropping ambiguous timestamp.
890 # changes out, but also for dropping ambiguous timestamp.
898 # delayed writing re-raise "ambiguous timestamp issue".
891 # delayed writing re-raise "ambiguous timestamp issue".
899 # See also the wiki page below for detail:
892 # See also the wiki page below for detail:
900 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
893 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
901
894
902 # emulate dropping timestamp in 'parsers.pack_dirstate'
895 # emulate dropping timestamp in 'parsers.pack_dirstate'
903 now = _getfsnow(self._opener)
896 now = _getfsnow(self._opener)
904 self._map.clearambiguoustimes(self._updatedfiles, now)
897 self._map.clearambiguoustimes(self._updatedfiles, now)
905
898
906 # emulate that all 'dirstate.normal' results are written out
899 # emulate that all 'dirstate.normal' results are written out
907 self._lastnormaltime = 0
900 self._lastnormaltime = 0
908 self._updatedfiles.clear()
901 self._updatedfiles.clear()
909
902
910 # delay writing in-memory changes out
903 # delay writing in-memory changes out
911 tr.addfilegenerator(
904 tr.addfilegenerator(
912 b'dirstate',
905 b'dirstate',
913 (self._filename,),
906 (self._filename,),
914 lambda f: self._writedirstate(tr, f),
907 lambda f: self._writedirstate(tr, f),
915 location=b'plain',
908 location=b'plain',
916 )
909 )
917 return
910 return
918
911
919 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
912 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
920 self._writedirstate(tr, st)
913 self._writedirstate(tr, st)
921
914
922 def addparentchangecallback(self, category, callback):
915 def addparentchangecallback(self, category, callback):
923 """add a callback to be called when the wd parents are changed
916 """add a callback to be called when the wd parents are changed
924
917
925 Callback will be called with the following arguments:
918 Callback will be called with the following arguments:
926 dirstate, (oldp1, oldp2), (newp1, newp2)
919 dirstate, (oldp1, oldp2), (newp1, newp2)
927
920
928 Category is a unique identifier to allow overwriting an old callback
921 Category is a unique identifier to allow overwriting an old callback
929 with a newer callback.
922 with a newer callback.
930 """
923 """
931 self._plchangecallbacks[category] = callback
924 self._plchangecallbacks[category] = callback
932
925
933 def _writedirstate(self, tr, st):
926 def _writedirstate(self, tr, st):
934 # notify callbacks about parents change
927 # notify callbacks about parents change
935 if self._origpl is not None and self._origpl != self._pl:
928 if self._origpl is not None and self._origpl != self._pl:
936 for c, callback in sorted(
929 for c, callback in sorted(
937 pycompat.iteritems(self._plchangecallbacks)
930 pycompat.iteritems(self._plchangecallbacks)
938 ):
931 ):
939 callback(self, self._origpl, self._pl)
932 callback(self, self._origpl, self._pl)
940 self._origpl = None
933 self._origpl = None
941 # use the modification time of the newly created temporary file as the
934 # use the modification time of the newly created temporary file as the
942 # filesystem's notion of 'now'
935 # filesystem's notion of 'now'
943 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
936 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
944
937
945 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
938 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
946 # timestamp of each entries in dirstate, because of 'now > mtime'
939 # timestamp of each entries in dirstate, because of 'now > mtime'
947 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
940 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
948 if delaywrite > 0:
941 if delaywrite > 0:
949 # do we have any files to delay for?
942 # do we have any files to delay for?
950 for f, e in pycompat.iteritems(self._map):
943 for f, e in pycompat.iteritems(self._map):
951 if e.need_delay(now):
944 if e.need_delay(now):
952 import time # to avoid useless import
945 import time # to avoid useless import
953
946
954 # rather than sleep n seconds, sleep until the next
947 # rather than sleep n seconds, sleep until the next
955 # multiple of n seconds
948 # multiple of n seconds
956 clock = time.time()
949 clock = time.time()
957 start = int(clock) - (int(clock) % delaywrite)
950 start = int(clock) - (int(clock) % delaywrite)
958 end = start + delaywrite
951 end = start + delaywrite
959 time.sleep(end - clock)
952 time.sleep(end - clock)
960 now = end # trust our estimate that the end is near now
953 now = end # trust our estimate that the end is near now
961 break
954 break
962
955
963 self._map.write(tr, st, now)
956 self._map.write(tr, st, now)
964 self._lastnormaltime = 0
957 self._lastnormaltime = 0
965 self._dirty = False
958 self._dirty = False
966
959
967 def _dirignore(self, f):
960 def _dirignore(self, f):
968 if self._ignore(f):
961 if self._ignore(f):
969 return True
962 return True
970 for p in pathutil.finddirs(f):
963 for p in pathutil.finddirs(f):
971 if self._ignore(p):
964 if self._ignore(p):
972 return True
965 return True
973 return False
966 return False
974
967
975 def _ignorefiles(self):
968 def _ignorefiles(self):
976 files = []
969 files = []
977 if os.path.exists(self._join(b'.hgignore')):
970 if os.path.exists(self._join(b'.hgignore')):
978 files.append(self._join(b'.hgignore'))
971 files.append(self._join(b'.hgignore'))
979 for name, path in self._ui.configitems(b"ui"):
972 for name, path in self._ui.configitems(b"ui"):
980 if name == b'ignore' or name.startswith(b'ignore.'):
973 if name == b'ignore' or name.startswith(b'ignore.'):
981 # we need to use os.path.join here rather than self._join
974 # we need to use os.path.join here rather than self._join
982 # because path is arbitrary and user-specified
975 # because path is arbitrary and user-specified
983 files.append(os.path.join(self._rootdir, util.expandpath(path)))
976 files.append(os.path.join(self._rootdir, util.expandpath(path)))
984 return files
977 return files
985
978
986 def _ignorefileandline(self, f):
979 def _ignorefileandline(self, f):
987 files = collections.deque(self._ignorefiles())
980 files = collections.deque(self._ignorefiles())
988 visited = set()
981 visited = set()
989 while files:
982 while files:
990 i = files.popleft()
983 i = files.popleft()
991 patterns = matchmod.readpatternfile(
984 patterns = matchmod.readpatternfile(
992 i, self._ui.warn, sourceinfo=True
985 i, self._ui.warn, sourceinfo=True
993 )
986 )
994 for pattern, lineno, line in patterns:
987 for pattern, lineno, line in patterns:
995 kind, p = matchmod._patsplit(pattern, b'glob')
988 kind, p = matchmod._patsplit(pattern, b'glob')
996 if kind == b"subinclude":
989 if kind == b"subinclude":
997 if p not in visited:
990 if p not in visited:
998 files.append(p)
991 files.append(p)
999 continue
992 continue
1000 m = matchmod.match(
993 m = matchmod.match(
1001 self._root, b'', [], [pattern], warn=self._ui.warn
994 self._root, b'', [], [pattern], warn=self._ui.warn
1002 )
995 )
1003 if m(f):
996 if m(f):
1004 return (i, lineno, line)
997 return (i, lineno, line)
1005 visited.add(i)
998 visited.add(i)
1006 return (None, -1, b"")
999 return (None, -1, b"")
1007
1000
1008 def _walkexplicit(self, match, subrepos):
1001 def _walkexplicit(self, match, subrepos):
1009 """Get stat data about the files explicitly specified by match.
1002 """Get stat data about the files explicitly specified by match.
1010
1003
1011 Return a triple (results, dirsfound, dirsnotfound).
1004 Return a triple (results, dirsfound, dirsnotfound).
1012 - results is a mapping from filename to stat result. It also contains
1005 - results is a mapping from filename to stat result. It also contains
1013 listings mapping subrepos and .hg to None.
1006 listings mapping subrepos and .hg to None.
1014 - dirsfound is a list of files found to be directories.
1007 - dirsfound is a list of files found to be directories.
1015 - dirsnotfound is a list of files that the dirstate thinks are
1008 - dirsnotfound is a list of files that the dirstate thinks are
1016 directories and that were not found."""
1009 directories and that were not found."""
1017
1010
1018 def badtype(mode):
1011 def badtype(mode):
1019 kind = _(b'unknown')
1012 kind = _(b'unknown')
1020 if stat.S_ISCHR(mode):
1013 if stat.S_ISCHR(mode):
1021 kind = _(b'character device')
1014 kind = _(b'character device')
1022 elif stat.S_ISBLK(mode):
1015 elif stat.S_ISBLK(mode):
1023 kind = _(b'block device')
1016 kind = _(b'block device')
1024 elif stat.S_ISFIFO(mode):
1017 elif stat.S_ISFIFO(mode):
1025 kind = _(b'fifo')
1018 kind = _(b'fifo')
1026 elif stat.S_ISSOCK(mode):
1019 elif stat.S_ISSOCK(mode):
1027 kind = _(b'socket')
1020 kind = _(b'socket')
1028 elif stat.S_ISDIR(mode):
1021 elif stat.S_ISDIR(mode):
1029 kind = _(b'directory')
1022 kind = _(b'directory')
1030 return _(b'unsupported file type (type is %s)') % kind
1023 return _(b'unsupported file type (type is %s)') % kind
1031
1024
1032 badfn = match.bad
1025 badfn = match.bad
1033 dmap = self._map
1026 dmap = self._map
1034 lstat = os.lstat
1027 lstat = os.lstat
1035 getkind = stat.S_IFMT
1028 getkind = stat.S_IFMT
1036 dirkind = stat.S_IFDIR
1029 dirkind = stat.S_IFDIR
1037 regkind = stat.S_IFREG
1030 regkind = stat.S_IFREG
1038 lnkkind = stat.S_IFLNK
1031 lnkkind = stat.S_IFLNK
1039 join = self._join
1032 join = self._join
1040 dirsfound = []
1033 dirsfound = []
1041 foundadd = dirsfound.append
1034 foundadd = dirsfound.append
1042 dirsnotfound = []
1035 dirsnotfound = []
1043 notfoundadd = dirsnotfound.append
1036 notfoundadd = dirsnotfound.append
1044
1037
1045 if not match.isexact() and self._checkcase:
1038 if not match.isexact() and self._checkcase:
1046 normalize = self._normalize
1039 normalize = self._normalize
1047 else:
1040 else:
1048 normalize = None
1041 normalize = None
1049
1042
1050 files = sorted(match.files())
1043 files = sorted(match.files())
1051 subrepos.sort()
1044 subrepos.sort()
1052 i, j = 0, 0
1045 i, j = 0, 0
1053 while i < len(files) and j < len(subrepos):
1046 while i < len(files) and j < len(subrepos):
1054 subpath = subrepos[j] + b"/"
1047 subpath = subrepos[j] + b"/"
1055 if files[i] < subpath:
1048 if files[i] < subpath:
1056 i += 1
1049 i += 1
1057 continue
1050 continue
1058 while i < len(files) and files[i].startswith(subpath):
1051 while i < len(files) and files[i].startswith(subpath):
1059 del files[i]
1052 del files[i]
1060 j += 1
1053 j += 1
1061
1054
1062 if not files or b'' in files:
1055 if not files or b'' in files:
1063 files = [b'']
1056 files = [b'']
1064 # constructing the foldmap is expensive, so don't do it for the
1057 # constructing the foldmap is expensive, so don't do it for the
1065 # common case where files is ['']
1058 # common case where files is ['']
1066 normalize = None
1059 normalize = None
1067 results = dict.fromkeys(subrepos)
1060 results = dict.fromkeys(subrepos)
1068 results[b'.hg'] = None
1061 results[b'.hg'] = None
1069
1062
1070 for ff in files:
1063 for ff in files:
1071 if normalize:
1064 if normalize:
1072 nf = normalize(ff, False, True)
1065 nf = normalize(ff, False, True)
1073 else:
1066 else:
1074 nf = ff
1067 nf = ff
1075 if nf in results:
1068 if nf in results:
1076 continue
1069 continue
1077
1070
1078 try:
1071 try:
1079 st = lstat(join(nf))
1072 st = lstat(join(nf))
1080 kind = getkind(st.st_mode)
1073 kind = getkind(st.st_mode)
1081 if kind == dirkind:
1074 if kind == dirkind:
1082 if nf in dmap:
1075 if nf in dmap:
1083 # file replaced by dir on disk but still in dirstate
1076 # file replaced by dir on disk but still in dirstate
1084 results[nf] = None
1077 results[nf] = None
1085 foundadd((nf, ff))
1078 foundadd((nf, ff))
1086 elif kind == regkind or kind == lnkkind:
1079 elif kind == regkind or kind == lnkkind:
1087 results[nf] = st
1080 results[nf] = st
1088 else:
1081 else:
1089 badfn(ff, badtype(kind))
1082 badfn(ff, badtype(kind))
1090 if nf in dmap:
1083 if nf in dmap:
1091 results[nf] = None
1084 results[nf] = None
1092 except OSError as inst: # nf not found on disk - it is dirstate only
1085 except OSError as inst: # nf not found on disk - it is dirstate only
1093 if nf in dmap: # does it exactly match a missing file?
1086 if nf in dmap: # does it exactly match a missing file?
1094 results[nf] = None
1087 results[nf] = None
1095 else: # does it match a missing directory?
1088 else: # does it match a missing directory?
1096 if self._map.hasdir(nf):
1089 if self._map.hasdir(nf):
1097 notfoundadd(nf)
1090 notfoundadd(nf)
1098 else:
1091 else:
1099 badfn(ff, encoding.strtolocal(inst.strerror))
1092 badfn(ff, encoding.strtolocal(inst.strerror))
1100
1093
1101 # match.files() may contain explicitly-specified paths that shouldn't
1094 # match.files() may contain explicitly-specified paths that shouldn't
1102 # be taken; drop them from the list of files found. dirsfound/notfound
1095 # be taken; drop them from the list of files found. dirsfound/notfound
1103 # aren't filtered here because they will be tested later.
1096 # aren't filtered here because they will be tested later.
1104 if match.anypats():
1097 if match.anypats():
1105 for f in list(results):
1098 for f in list(results):
1106 if f == b'.hg' or f in subrepos:
1099 if f == b'.hg' or f in subrepos:
1107 # keep sentinel to disable further out-of-repo walks
1100 # keep sentinel to disable further out-of-repo walks
1108 continue
1101 continue
1109 if not match(f):
1102 if not match(f):
1110 del results[f]
1103 del results[f]
1111
1104
1112 # Case insensitive filesystems cannot rely on lstat() failing to detect
1105 # Case insensitive filesystems cannot rely on lstat() failing to detect
1113 # a case-only rename. Prune the stat object for any file that does not
1106 # a case-only rename. Prune the stat object for any file that does not
1114 # match the case in the filesystem, if there are multiple files that
1107 # match the case in the filesystem, if there are multiple files that
1115 # normalize to the same path.
1108 # normalize to the same path.
1116 if match.isexact() and self._checkcase:
1109 if match.isexact() and self._checkcase:
1117 normed = {}
1110 normed = {}
1118
1111
1119 for f, st in pycompat.iteritems(results):
1112 for f, st in pycompat.iteritems(results):
1120 if st is None:
1113 if st is None:
1121 continue
1114 continue
1122
1115
1123 nc = util.normcase(f)
1116 nc = util.normcase(f)
1124 paths = normed.get(nc)
1117 paths = normed.get(nc)
1125
1118
1126 if paths is None:
1119 if paths is None:
1127 paths = set()
1120 paths = set()
1128 normed[nc] = paths
1121 normed[nc] = paths
1129
1122
1130 paths.add(f)
1123 paths.add(f)
1131
1124
1132 for norm, paths in pycompat.iteritems(normed):
1125 for norm, paths in pycompat.iteritems(normed):
1133 if len(paths) > 1:
1126 if len(paths) > 1:
1134 for path in paths:
1127 for path in paths:
1135 folded = self._discoverpath(
1128 folded = self._discoverpath(
1136 path, norm, True, None, self._map.dirfoldmap
1129 path, norm, True, None, self._map.dirfoldmap
1137 )
1130 )
1138 if path != folded:
1131 if path != folded:
1139 results[path] = None
1132 results[path] = None
1140
1133
1141 return results, dirsfound, dirsnotfound
1134 return results, dirsfound, dirsnotfound
1142
1135
1143 def walk(self, match, subrepos, unknown, ignored, full=True):
1136 def walk(self, match, subrepos, unknown, ignored, full=True):
1144 """
1137 """
1145 Walk recursively through the directory tree, finding all files
1138 Walk recursively through the directory tree, finding all files
1146 matched by match.
1139 matched by match.
1147
1140
1148 If full is False, maybe skip some known-clean files.
1141 If full is False, maybe skip some known-clean files.
1149
1142
1150 Return a dict mapping filename to stat-like object (either
1143 Return a dict mapping filename to stat-like object (either
1151 mercurial.osutil.stat instance or return value of os.stat()).
1144 mercurial.osutil.stat instance or return value of os.stat()).
1152
1145
1153 """
1146 """
1154 # full is a flag that extensions that hook into walk can use -- this
1147 # full is a flag that extensions that hook into walk can use -- this
1155 # implementation doesn't use it at all. This satisfies the contract
1148 # implementation doesn't use it at all. This satisfies the contract
1156 # because we only guarantee a "maybe".
1149 # because we only guarantee a "maybe".
1157
1150
1158 if ignored:
1151 if ignored:
1159 ignore = util.never
1152 ignore = util.never
1160 dirignore = util.never
1153 dirignore = util.never
1161 elif unknown:
1154 elif unknown:
1162 ignore = self._ignore
1155 ignore = self._ignore
1163 dirignore = self._dirignore
1156 dirignore = self._dirignore
1164 else:
1157 else:
1165 # if not unknown and not ignored, drop dir recursion and step 2
1158 # if not unknown and not ignored, drop dir recursion and step 2
1166 ignore = util.always
1159 ignore = util.always
1167 dirignore = util.always
1160 dirignore = util.always
1168
1161
1169 matchfn = match.matchfn
1162 matchfn = match.matchfn
1170 matchalways = match.always()
1163 matchalways = match.always()
1171 matchtdir = match.traversedir
1164 matchtdir = match.traversedir
1172 dmap = self._map
1165 dmap = self._map
1173 listdir = util.listdir
1166 listdir = util.listdir
1174 lstat = os.lstat
1167 lstat = os.lstat
1175 dirkind = stat.S_IFDIR
1168 dirkind = stat.S_IFDIR
1176 regkind = stat.S_IFREG
1169 regkind = stat.S_IFREG
1177 lnkkind = stat.S_IFLNK
1170 lnkkind = stat.S_IFLNK
1178 join = self._join
1171 join = self._join
1179
1172
1180 exact = skipstep3 = False
1173 exact = skipstep3 = False
1181 if match.isexact(): # match.exact
1174 if match.isexact(): # match.exact
1182 exact = True
1175 exact = True
1183 dirignore = util.always # skip step 2
1176 dirignore = util.always # skip step 2
1184 elif match.prefix(): # match.match, no patterns
1177 elif match.prefix(): # match.match, no patterns
1185 skipstep3 = True
1178 skipstep3 = True
1186
1179
1187 if not exact and self._checkcase:
1180 if not exact and self._checkcase:
1188 normalize = self._normalize
1181 normalize = self._normalize
1189 normalizefile = self._normalizefile
1182 normalizefile = self._normalizefile
1190 skipstep3 = False
1183 skipstep3 = False
1191 else:
1184 else:
1192 normalize = self._normalize
1185 normalize = self._normalize
1193 normalizefile = None
1186 normalizefile = None
1194
1187
1195 # step 1: find all explicit files
1188 # step 1: find all explicit files
1196 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1189 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1197 if matchtdir:
1190 if matchtdir:
1198 for d in work:
1191 for d in work:
1199 matchtdir(d[0])
1192 matchtdir(d[0])
1200 for d in dirsnotfound:
1193 for d in dirsnotfound:
1201 matchtdir(d)
1194 matchtdir(d)
1202
1195
1203 skipstep3 = skipstep3 and not (work or dirsnotfound)
1196 skipstep3 = skipstep3 and not (work or dirsnotfound)
1204 work = [d for d in work if not dirignore(d[0])]
1197 work = [d for d in work if not dirignore(d[0])]
1205
1198
1206 # step 2: visit subdirectories
1199 # step 2: visit subdirectories
1207 def traverse(work, alreadynormed):
1200 def traverse(work, alreadynormed):
1208 wadd = work.append
1201 wadd = work.append
1209 while work:
1202 while work:
1210 tracing.counter('dirstate.walk work', len(work))
1203 tracing.counter('dirstate.walk work', len(work))
1211 nd = work.pop()
1204 nd = work.pop()
1212 visitentries = match.visitchildrenset(nd)
1205 visitentries = match.visitchildrenset(nd)
1213 if not visitentries:
1206 if not visitentries:
1214 continue
1207 continue
1215 if visitentries == b'this' or visitentries == b'all':
1208 if visitentries == b'this' or visitentries == b'all':
1216 visitentries = None
1209 visitentries = None
1217 skip = None
1210 skip = None
1218 if nd != b'':
1211 if nd != b'':
1219 skip = b'.hg'
1212 skip = b'.hg'
1220 try:
1213 try:
1221 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1214 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1222 entries = listdir(join(nd), stat=True, skip=skip)
1215 entries = listdir(join(nd), stat=True, skip=skip)
1223 except OSError as inst:
1216 except OSError as inst:
1224 if inst.errno in (errno.EACCES, errno.ENOENT):
1217 if inst.errno in (errno.EACCES, errno.ENOENT):
1225 match.bad(
1218 match.bad(
1226 self.pathto(nd), encoding.strtolocal(inst.strerror)
1219 self.pathto(nd), encoding.strtolocal(inst.strerror)
1227 )
1220 )
1228 continue
1221 continue
1229 raise
1222 raise
1230 for f, kind, st in entries:
1223 for f, kind, st in entries:
1231 # Some matchers may return files in the visitentries set,
1224 # Some matchers may return files in the visitentries set,
1232 # instead of 'this', if the matcher explicitly mentions them
1225 # instead of 'this', if the matcher explicitly mentions them
1233 # and is not an exactmatcher. This is acceptable; we do not
1226 # and is not an exactmatcher. This is acceptable; we do not
1234 # make any hard assumptions about file-or-directory below
1227 # make any hard assumptions about file-or-directory below
1235 # based on the presence of `f` in visitentries. If
1228 # based on the presence of `f` in visitentries. If
1236 # visitchildrenset returned a set, we can always skip the
1229 # visitchildrenset returned a set, we can always skip the
1237 # entries *not* in the set it provided regardless of whether
1230 # entries *not* in the set it provided regardless of whether
1238 # they're actually a file or a directory.
1231 # they're actually a file or a directory.
1239 if visitentries and f not in visitentries:
1232 if visitentries and f not in visitentries:
1240 continue
1233 continue
1241 if normalizefile:
1234 if normalizefile:
1242 # even though f might be a directory, we're only
1235 # even though f might be a directory, we're only
1243 # interested in comparing it to files currently in the
1236 # interested in comparing it to files currently in the
1244 # dmap -- therefore normalizefile is enough
1237 # dmap -- therefore normalizefile is enough
1245 nf = normalizefile(
1238 nf = normalizefile(
1246 nd and (nd + b"/" + f) or f, True, True
1239 nd and (nd + b"/" + f) or f, True, True
1247 )
1240 )
1248 else:
1241 else:
1249 nf = nd and (nd + b"/" + f) or f
1242 nf = nd and (nd + b"/" + f) or f
1250 if nf not in results:
1243 if nf not in results:
1251 if kind == dirkind:
1244 if kind == dirkind:
1252 if not ignore(nf):
1245 if not ignore(nf):
1253 if matchtdir:
1246 if matchtdir:
1254 matchtdir(nf)
1247 matchtdir(nf)
1255 wadd(nf)
1248 wadd(nf)
1256 if nf in dmap and (matchalways or matchfn(nf)):
1249 if nf in dmap and (matchalways or matchfn(nf)):
1257 results[nf] = None
1250 results[nf] = None
1258 elif kind == regkind or kind == lnkkind:
1251 elif kind == regkind or kind == lnkkind:
1259 if nf in dmap:
1252 if nf in dmap:
1260 if matchalways or matchfn(nf):
1253 if matchalways or matchfn(nf):
1261 results[nf] = st
1254 results[nf] = st
1262 elif (matchalways or matchfn(nf)) and not ignore(
1255 elif (matchalways or matchfn(nf)) and not ignore(
1263 nf
1256 nf
1264 ):
1257 ):
1265 # unknown file -- normalize if necessary
1258 # unknown file -- normalize if necessary
1266 if not alreadynormed:
1259 if not alreadynormed:
1267 nf = normalize(nf, False, True)
1260 nf = normalize(nf, False, True)
1268 results[nf] = st
1261 results[nf] = st
1269 elif nf in dmap and (matchalways or matchfn(nf)):
1262 elif nf in dmap and (matchalways or matchfn(nf)):
1270 results[nf] = None
1263 results[nf] = None
1271
1264
1272 for nd, d in work:
1265 for nd, d in work:
1273 # alreadynormed means that processwork doesn't have to do any
1266 # alreadynormed means that processwork doesn't have to do any
1274 # expensive directory normalization
1267 # expensive directory normalization
1275 alreadynormed = not normalize or nd == d
1268 alreadynormed = not normalize or nd == d
1276 traverse([d], alreadynormed)
1269 traverse([d], alreadynormed)
1277
1270
1278 for s in subrepos:
1271 for s in subrepos:
1279 del results[s]
1272 del results[s]
1280 del results[b'.hg']
1273 del results[b'.hg']
1281
1274
1282 # step 3: visit remaining files from dmap
1275 # step 3: visit remaining files from dmap
1283 if not skipstep3 and not exact:
1276 if not skipstep3 and not exact:
1284 # If a dmap file is not in results yet, it was either
1277 # If a dmap file is not in results yet, it was either
1285 # a) not matching matchfn b) ignored, c) missing, or d) under a
1278 # a) not matching matchfn b) ignored, c) missing, or d) under a
1286 # symlink directory.
1279 # symlink directory.
1287 if not results and matchalways:
1280 if not results and matchalways:
1288 visit = [f for f in dmap]
1281 visit = [f for f in dmap]
1289 else:
1282 else:
1290 visit = [f for f in dmap if f not in results and matchfn(f)]
1283 visit = [f for f in dmap if f not in results and matchfn(f)]
1291 visit.sort()
1284 visit.sort()
1292
1285
1293 if unknown:
1286 if unknown:
1294 # unknown == True means we walked all dirs under the roots
1287 # unknown == True means we walked all dirs under the roots
1295 # that wasn't ignored, and everything that matched was stat'ed
1288 # that wasn't ignored, and everything that matched was stat'ed
1296 # and is already in results.
1289 # and is already in results.
1297 # The rest must thus be ignored or under a symlink.
1290 # The rest must thus be ignored or under a symlink.
1298 audit_path = pathutil.pathauditor(self._root, cached=True)
1291 audit_path = pathutil.pathauditor(self._root, cached=True)
1299
1292
1300 for nf in iter(visit):
1293 for nf in iter(visit):
1301 # If a stat for the same file was already added with a
1294 # If a stat for the same file was already added with a
1302 # different case, don't add one for this, since that would
1295 # different case, don't add one for this, since that would
1303 # make it appear as if the file exists under both names
1296 # make it appear as if the file exists under both names
1304 # on disk.
1297 # on disk.
1305 if (
1298 if (
1306 normalizefile
1299 normalizefile
1307 and normalizefile(nf, True, True) in results
1300 and normalizefile(nf, True, True) in results
1308 ):
1301 ):
1309 results[nf] = None
1302 results[nf] = None
1310 # Report ignored items in the dmap as long as they are not
1303 # Report ignored items in the dmap as long as they are not
1311 # under a symlink directory.
1304 # under a symlink directory.
1312 elif audit_path.check(nf):
1305 elif audit_path.check(nf):
1313 try:
1306 try:
1314 results[nf] = lstat(join(nf))
1307 results[nf] = lstat(join(nf))
1315 # file was just ignored, no links, and exists
1308 # file was just ignored, no links, and exists
1316 except OSError:
1309 except OSError:
1317 # file doesn't exist
1310 # file doesn't exist
1318 results[nf] = None
1311 results[nf] = None
1319 else:
1312 else:
1320 # It's either missing or under a symlink directory
1313 # It's either missing or under a symlink directory
1321 # which we in this case report as missing
1314 # which we in this case report as missing
1322 results[nf] = None
1315 results[nf] = None
1323 else:
1316 else:
1324 # We may not have walked the full directory tree above,
1317 # We may not have walked the full directory tree above,
1325 # so stat and check everything we missed.
1318 # so stat and check everything we missed.
1326 iv = iter(visit)
1319 iv = iter(visit)
1327 for st in util.statfiles([join(i) for i in visit]):
1320 for st in util.statfiles([join(i) for i in visit]):
1328 results[next(iv)] = st
1321 results[next(iv)] = st
1329 return results
1322 return results
1330
1323
1331 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1324 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1332 # Force Rayon (Rust parallelism library) to respect the number of
1325 # Force Rayon (Rust parallelism library) to respect the number of
1333 # workers. This is a temporary workaround until Rust code knows
1326 # workers. This is a temporary workaround until Rust code knows
1334 # how to read the config file.
1327 # how to read the config file.
1335 numcpus = self._ui.configint(b"worker", b"numcpus")
1328 numcpus = self._ui.configint(b"worker", b"numcpus")
1336 if numcpus is not None:
1329 if numcpus is not None:
1337 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1330 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1338
1331
1339 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1332 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1340 if not workers_enabled:
1333 if not workers_enabled:
1341 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1334 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1342
1335
1343 (
1336 (
1344 lookup,
1337 lookup,
1345 modified,
1338 modified,
1346 added,
1339 added,
1347 removed,
1340 removed,
1348 deleted,
1341 deleted,
1349 clean,
1342 clean,
1350 ignored,
1343 ignored,
1351 unknown,
1344 unknown,
1352 warnings,
1345 warnings,
1353 bad,
1346 bad,
1354 traversed,
1347 traversed,
1355 dirty,
1348 dirty,
1356 ) = rustmod.status(
1349 ) = rustmod.status(
1357 self._map._rustmap,
1350 self._map._rustmap,
1358 matcher,
1351 matcher,
1359 self._rootdir,
1352 self._rootdir,
1360 self._ignorefiles(),
1353 self._ignorefiles(),
1361 self._checkexec,
1354 self._checkexec,
1362 self._lastnormaltime,
1355 self._lastnormaltime,
1363 bool(list_clean),
1356 bool(list_clean),
1364 bool(list_ignored),
1357 bool(list_ignored),
1365 bool(list_unknown),
1358 bool(list_unknown),
1366 bool(matcher.traversedir),
1359 bool(matcher.traversedir),
1367 )
1360 )
1368
1361
1369 self._dirty |= dirty
1362 self._dirty |= dirty
1370
1363
1371 if matcher.traversedir:
1364 if matcher.traversedir:
1372 for dir in traversed:
1365 for dir in traversed:
1373 matcher.traversedir(dir)
1366 matcher.traversedir(dir)
1374
1367
1375 if self._ui.warn:
1368 if self._ui.warn:
1376 for item in warnings:
1369 for item in warnings:
1377 if isinstance(item, tuple):
1370 if isinstance(item, tuple):
1378 file_path, syntax = item
1371 file_path, syntax = item
1379 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1372 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1380 file_path,
1373 file_path,
1381 syntax,
1374 syntax,
1382 )
1375 )
1383 self._ui.warn(msg)
1376 self._ui.warn(msg)
1384 else:
1377 else:
1385 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1378 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1386 self._ui.warn(
1379 self._ui.warn(
1387 msg
1380 msg
1388 % (
1381 % (
1389 pathutil.canonpath(
1382 pathutil.canonpath(
1390 self._rootdir, self._rootdir, item
1383 self._rootdir, self._rootdir, item
1391 ),
1384 ),
1392 b"No such file or directory",
1385 b"No such file or directory",
1393 )
1386 )
1394 )
1387 )
1395
1388
1396 for (fn, message) in bad:
1389 for (fn, message) in bad:
1397 matcher.bad(fn, encoding.strtolocal(message))
1390 matcher.bad(fn, encoding.strtolocal(message))
1398
1391
1399 status = scmutil.status(
1392 status = scmutil.status(
1400 modified=modified,
1393 modified=modified,
1401 added=added,
1394 added=added,
1402 removed=removed,
1395 removed=removed,
1403 deleted=deleted,
1396 deleted=deleted,
1404 unknown=unknown,
1397 unknown=unknown,
1405 ignored=ignored,
1398 ignored=ignored,
1406 clean=clean,
1399 clean=clean,
1407 )
1400 )
1408 return (lookup, status)
1401 return (lookup, status)
1409
1402
1410 def status(self, match, subrepos, ignored, clean, unknown):
1403 def status(self, match, subrepos, ignored, clean, unknown):
1411 """Determine the status of the working copy relative to the
1404 """Determine the status of the working copy relative to the
1412 dirstate and return a pair of (unsure, status), where status is of type
1405 dirstate and return a pair of (unsure, status), where status is of type
1413 scmutil.status and:
1406 scmutil.status and:
1414
1407
1415 unsure:
1408 unsure:
1416 files that might have been modified since the dirstate was
1409 files that might have been modified since the dirstate was
1417 written, but need to be read to be sure (size is the same
1410 written, but need to be read to be sure (size is the same
1418 but mtime differs)
1411 but mtime differs)
1419 status.modified:
1412 status.modified:
1420 files that have definitely been modified since the dirstate
1413 files that have definitely been modified since the dirstate
1421 was written (different size or mode)
1414 was written (different size or mode)
1422 status.clean:
1415 status.clean:
1423 files that have definitely not been modified since the
1416 files that have definitely not been modified since the
1424 dirstate was written
1417 dirstate was written
1425 """
1418 """
1426 listignored, listclean, listunknown = ignored, clean, unknown
1419 listignored, listclean, listunknown = ignored, clean, unknown
1427 lookup, modified, added, unknown, ignored = [], [], [], [], []
1420 lookup, modified, added, unknown, ignored = [], [], [], [], []
1428 removed, deleted, clean = [], [], []
1421 removed, deleted, clean = [], [], []
1429
1422
1430 dmap = self._map
1423 dmap = self._map
1431 dmap.preload()
1424 dmap.preload()
1432
1425
1433 use_rust = True
1426 use_rust = True
1434
1427
1435 allowed_matchers = (
1428 allowed_matchers = (
1436 matchmod.alwaysmatcher,
1429 matchmod.alwaysmatcher,
1437 matchmod.exactmatcher,
1430 matchmod.exactmatcher,
1438 matchmod.includematcher,
1431 matchmod.includematcher,
1439 )
1432 )
1440
1433
1441 if rustmod is None:
1434 if rustmod is None:
1442 use_rust = False
1435 use_rust = False
1443 elif self._checkcase:
1436 elif self._checkcase:
1444 # Case-insensitive filesystems are not handled yet
1437 # Case-insensitive filesystems are not handled yet
1445 use_rust = False
1438 use_rust = False
1446 elif subrepos:
1439 elif subrepos:
1447 use_rust = False
1440 use_rust = False
1448 elif sparse.enabled:
1441 elif sparse.enabled:
1449 use_rust = False
1442 use_rust = False
1450 elif not isinstance(match, allowed_matchers):
1443 elif not isinstance(match, allowed_matchers):
1451 # Some matchers have yet to be implemented
1444 # Some matchers have yet to be implemented
1452 use_rust = False
1445 use_rust = False
1453
1446
1454 if use_rust:
1447 if use_rust:
1455 try:
1448 try:
1456 return self._rust_status(
1449 return self._rust_status(
1457 match, listclean, listignored, listunknown
1450 match, listclean, listignored, listunknown
1458 )
1451 )
1459 except rustmod.FallbackError:
1452 except rustmod.FallbackError:
1460 pass
1453 pass
1461
1454
1462 def noop(f):
1455 def noop(f):
1463 pass
1456 pass
1464
1457
1465 dcontains = dmap.__contains__
1458 dcontains = dmap.__contains__
1466 dget = dmap.__getitem__
1459 dget = dmap.__getitem__
1467 ladd = lookup.append # aka "unsure"
1460 ladd = lookup.append # aka "unsure"
1468 madd = modified.append
1461 madd = modified.append
1469 aadd = added.append
1462 aadd = added.append
1470 uadd = unknown.append if listunknown else noop
1463 uadd = unknown.append if listunknown else noop
1471 iadd = ignored.append if listignored else noop
1464 iadd = ignored.append if listignored else noop
1472 radd = removed.append
1465 radd = removed.append
1473 dadd = deleted.append
1466 dadd = deleted.append
1474 cadd = clean.append if listclean else noop
1467 cadd = clean.append if listclean else noop
1475 mexact = match.exact
1468 mexact = match.exact
1476 dirignore = self._dirignore
1469 dirignore = self._dirignore
1477 checkexec = self._checkexec
1470 checkexec = self._checkexec
1478 copymap = self._map.copymap
1471 copymap = self._map.copymap
1479 lastnormaltime = self._lastnormaltime
1472 lastnormaltime = self._lastnormaltime
1480
1473
1481 # We need to do full walks when either
1474 # We need to do full walks when either
1482 # - we're listing all clean files, or
1475 # - we're listing all clean files, or
1483 # - match.traversedir does something, because match.traversedir should
1476 # - match.traversedir does something, because match.traversedir should
1484 # be called for every dir in the working dir
1477 # be called for every dir in the working dir
1485 full = listclean or match.traversedir is not None
1478 full = listclean or match.traversedir is not None
1486 for fn, st in pycompat.iteritems(
1479 for fn, st in pycompat.iteritems(
1487 self.walk(match, subrepos, listunknown, listignored, full=full)
1480 self.walk(match, subrepos, listunknown, listignored, full=full)
1488 ):
1481 ):
1489 if not dcontains(fn):
1482 if not dcontains(fn):
1490 if (listignored or mexact(fn)) and dirignore(fn):
1483 if (listignored or mexact(fn)) and dirignore(fn):
1491 if listignored:
1484 if listignored:
1492 iadd(fn)
1485 iadd(fn)
1493 else:
1486 else:
1494 uadd(fn)
1487 uadd(fn)
1495 continue
1488 continue
1496
1489
1497 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1490 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1498 # written like that for performance reasons. dmap[fn] is not a
1491 # written like that for performance reasons. dmap[fn] is not a
1499 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1492 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1500 # opcode has fast paths when the value to be unpacked is a tuple or
1493 # opcode has fast paths when the value to be unpacked is a tuple or
1501 # a list, but falls back to creating a full-fledged iterator in
1494 # a list, but falls back to creating a full-fledged iterator in
1502 # general. That is much slower than simply accessing and storing the
1495 # general. That is much slower than simply accessing and storing the
1503 # tuple members one by one.
1496 # tuple members one by one.
1504 t = dget(fn)
1497 t = dget(fn)
1505 mode = t.mode
1498 mode = t.mode
1506 size = t.size
1499 size = t.size
1507 time = t.mtime
1500 time = t.mtime
1508
1501
1509 if not st and t.tracked:
1502 if not st and t.tracked:
1510 dadd(fn)
1503 dadd(fn)
1511 elif t.merged:
1504 elif t.merged:
1512 madd(fn)
1505 madd(fn)
1513 elif t.added:
1506 elif t.added:
1514 aadd(fn)
1507 aadd(fn)
1515 elif t.removed:
1508 elif t.removed:
1516 radd(fn)
1509 radd(fn)
1517 elif t.tracked:
1510 elif t.tracked:
1518 if (
1511 if (
1519 size >= 0
1512 size >= 0
1520 and (
1513 and (
1521 (size != st.st_size and size != st.st_size & _rangemask)
1514 (size != st.st_size and size != st.st_size & _rangemask)
1522 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1515 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1523 )
1516 )
1524 or t.from_p2
1517 or t.from_p2
1525 or fn in copymap
1518 or fn in copymap
1526 ):
1519 ):
1527 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1520 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1528 # issue6456: Size returned may be longer due to
1521 # issue6456: Size returned may be longer due to
1529 # encryption on EXT-4 fscrypt, undecided.
1522 # encryption on EXT-4 fscrypt, undecided.
1530 ladd(fn)
1523 ladd(fn)
1531 else:
1524 else:
1532 madd(fn)
1525 madd(fn)
1533 elif (
1526 elif (
1534 time != st[stat.ST_MTIME]
1527 time != st[stat.ST_MTIME]
1535 and time != st[stat.ST_MTIME] & _rangemask
1528 and time != st[stat.ST_MTIME] & _rangemask
1536 ):
1529 ):
1537 ladd(fn)
1530 ladd(fn)
1538 elif st[stat.ST_MTIME] == lastnormaltime:
1531 elif st[stat.ST_MTIME] == lastnormaltime:
1539 # fn may have just been marked as normal and it may have
1532 # fn may have just been marked as normal and it may have
1540 # changed in the same second without changing its size.
1533 # changed in the same second without changing its size.
1541 # This can happen if we quickly do multiple commits.
1534 # This can happen if we quickly do multiple commits.
1542 # Force lookup, so we don't miss such a racy file change.
1535 # Force lookup, so we don't miss such a racy file change.
1543 ladd(fn)
1536 ladd(fn)
1544 elif listclean:
1537 elif listclean:
1545 cadd(fn)
1538 cadd(fn)
1546 status = scmutil.status(
1539 status = scmutil.status(
1547 modified, added, removed, deleted, unknown, ignored, clean
1540 modified, added, removed, deleted, unknown, ignored, clean
1548 )
1541 )
1549 return (lookup, status)
1542 return (lookup, status)
1550
1543
1551 def matches(self, match):
1544 def matches(self, match):
1552 """
1545 """
1553 return files in the dirstate (in whatever state) filtered by match
1546 return files in the dirstate (in whatever state) filtered by match
1554 """
1547 """
1555 dmap = self._map
1548 dmap = self._map
1556 if rustmod is not None:
1549 if rustmod is not None:
1557 dmap = self._map._rustmap
1550 dmap = self._map._rustmap
1558
1551
1559 if match.always():
1552 if match.always():
1560 return dmap.keys()
1553 return dmap.keys()
1561 files = match.files()
1554 files = match.files()
1562 if match.isexact():
1555 if match.isexact():
1563 # fast path -- filter the other way around, since typically files is
1556 # fast path -- filter the other way around, since typically files is
1564 # much smaller than dmap
1557 # much smaller than dmap
1565 return [f for f in files if f in dmap]
1558 return [f for f in files if f in dmap]
1566 if match.prefix() and all(fn in dmap for fn in files):
1559 if match.prefix() and all(fn in dmap for fn in files):
1567 # fast path -- all the values are known to be files, so just return
1560 # fast path -- all the values are known to be files, so just return
1568 # that
1561 # that
1569 return list(files)
1562 return list(files)
1570 return [f for f in dmap if match(f)]
1563 return [f for f in dmap if match(f)]
1571
1564
1572 def _actualfilename(self, tr):
1565 def _actualfilename(self, tr):
1573 if tr:
1566 if tr:
1574 return self._pendingfilename
1567 return self._pendingfilename
1575 else:
1568 else:
1576 return self._filename
1569 return self._filename
1577
1570
1578 def savebackup(self, tr, backupname):
1571 def savebackup(self, tr, backupname):
1579 '''Save current dirstate into backup file'''
1572 '''Save current dirstate into backup file'''
1580 filename = self._actualfilename(tr)
1573 filename = self._actualfilename(tr)
1581 assert backupname != filename
1574 assert backupname != filename
1582
1575
1583 # use '_writedirstate' instead of 'write' to write changes certainly,
1576 # use '_writedirstate' instead of 'write' to write changes certainly,
1584 # because the latter omits writing out if transaction is running.
1577 # because the latter omits writing out if transaction is running.
1585 # output file will be used to create backup of dirstate at this point.
1578 # output file will be used to create backup of dirstate at this point.
1586 if self._dirty or not self._opener.exists(filename):
1579 if self._dirty or not self._opener.exists(filename):
1587 self._writedirstate(
1580 self._writedirstate(
1588 tr,
1581 tr,
1589 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1582 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1590 )
1583 )
1591
1584
1592 if tr:
1585 if tr:
1593 # ensure that subsequent tr.writepending returns True for
1586 # ensure that subsequent tr.writepending returns True for
1594 # changes written out above, even if dirstate is never
1587 # changes written out above, even if dirstate is never
1595 # changed after this
1588 # changed after this
1596 tr.addfilegenerator(
1589 tr.addfilegenerator(
1597 b'dirstate',
1590 b'dirstate',
1598 (self._filename,),
1591 (self._filename,),
1599 lambda f: self._writedirstate(tr, f),
1592 lambda f: self._writedirstate(tr, f),
1600 location=b'plain',
1593 location=b'plain',
1601 )
1594 )
1602
1595
1603 # ensure that pending file written above is unlinked at
1596 # ensure that pending file written above is unlinked at
1604 # failure, even if tr.writepending isn't invoked until the
1597 # failure, even if tr.writepending isn't invoked until the
1605 # end of this transaction
1598 # end of this transaction
1606 tr.registertmp(filename, location=b'plain')
1599 tr.registertmp(filename, location=b'plain')
1607
1600
1608 self._opener.tryunlink(backupname)
1601 self._opener.tryunlink(backupname)
1609 # hardlink backup is okay because _writedirstate is always called
1602 # hardlink backup is okay because _writedirstate is always called
1610 # with an "atomictemp=True" file.
1603 # with an "atomictemp=True" file.
1611 util.copyfile(
1604 util.copyfile(
1612 self._opener.join(filename),
1605 self._opener.join(filename),
1613 self._opener.join(backupname),
1606 self._opener.join(backupname),
1614 hardlink=True,
1607 hardlink=True,
1615 )
1608 )
1616
1609
1617 def restorebackup(self, tr, backupname):
1610 def restorebackup(self, tr, backupname):
1618 '''Restore dirstate by backup file'''
1611 '''Restore dirstate by backup file'''
1619 # this "invalidate()" prevents "wlock.release()" from writing
1612 # this "invalidate()" prevents "wlock.release()" from writing
1620 # changes of dirstate out after restoring from backup file
1613 # changes of dirstate out after restoring from backup file
1621 self.invalidate()
1614 self.invalidate()
1622 filename = self._actualfilename(tr)
1615 filename = self._actualfilename(tr)
1623 o = self._opener
1616 o = self._opener
1624 if util.samefile(o.join(backupname), o.join(filename)):
1617 if util.samefile(o.join(backupname), o.join(filename)):
1625 o.unlink(backupname)
1618 o.unlink(backupname)
1626 else:
1619 else:
1627 o.rename(backupname, filename, checkambig=True)
1620 o.rename(backupname, filename, checkambig=True)
1628
1621
1629 def clearbackup(self, tr, backupname):
1622 def clearbackup(self, tr, backupname):
1630 '''Clear backup file'''
1623 '''Clear backup file'''
1631 self._opener.unlink(backupname)
1624 self._opener.unlink(backupname)
@@ -1,857 +1,862 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import collections
3 import collections
4 import errno
4 import errno
5 import shutil
5 import shutil
6 import struct
6 import struct
7
7
8 from .i18n import _
8 from .i18n import _
9 from .node import (
9 from .node import (
10 bin,
10 bin,
11 hex,
11 hex,
12 nullrev,
12 nullrev,
13 )
13 )
14 from . import (
14 from . import (
15 error,
15 error,
16 filemerge,
16 filemerge,
17 pycompat,
17 pycompat,
18 util,
18 util,
19 )
19 )
20 from .utils import hashutil
20 from .utils import hashutil
21
21
22 _pack = struct.pack
22 _pack = struct.pack
23 _unpack = struct.unpack
23 _unpack = struct.unpack
24
24
25
25
26 def _droponode(data):
26 def _droponode(data):
27 # used for compatibility for v1
27 # used for compatibility for v1
28 bits = data.split(b'\0')
28 bits = data.split(b'\0')
29 bits = bits[:-2] + bits[-1:]
29 bits = bits[:-2] + bits[-1:]
30 return b'\0'.join(bits)
30 return b'\0'.join(bits)
31
31
32
32
33 def _filectxorabsent(hexnode, ctx, f):
33 def _filectxorabsent(hexnode, ctx, f):
34 if hexnode == ctx.repo().nodeconstants.nullhex:
34 if hexnode == ctx.repo().nodeconstants.nullhex:
35 return filemerge.absentfilectx(ctx, f)
35 return filemerge.absentfilectx(ctx, f)
36 else:
36 else:
37 return ctx[f]
37 return ctx[f]
38
38
39
39
40 # Merge state record types. See ``mergestate`` docs for more.
40 # Merge state record types. See ``mergestate`` docs for more.
41
41
42 ####
42 ####
43 # merge records which records metadata about a current merge
43 # merge records which records metadata about a current merge
44 # exists only once in a mergestate
44 # exists only once in a mergestate
45 #####
45 #####
46 RECORD_LOCAL = b'L'
46 RECORD_LOCAL = b'L'
47 RECORD_OTHER = b'O'
47 RECORD_OTHER = b'O'
48 # record merge labels
48 # record merge labels
49 RECORD_LABELS = b'l'
49 RECORD_LABELS = b'l'
50
50
51 #####
51 #####
52 # record extra information about files, with one entry containing info about one
52 # record extra information about files, with one entry containing info about one
53 # file. Hence, multiple of them can exists
53 # file. Hence, multiple of them can exists
54 #####
54 #####
55 RECORD_FILE_VALUES = b'f'
55 RECORD_FILE_VALUES = b'f'
56
56
57 #####
57 #####
58 # merge records which represents state of individual merges of files/folders
58 # merge records which represents state of individual merges of files/folders
59 # These are top level records for each entry containing merge related info.
59 # These are top level records for each entry containing merge related info.
60 # Each record of these has info about one file. Hence multiple of them can
60 # Each record of these has info about one file. Hence multiple of them can
61 # exists
61 # exists
62 #####
62 #####
63 RECORD_MERGED = b'F'
63 RECORD_MERGED = b'F'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
65 # the path was dir on one side of merge and file on another
65 # the path was dir on one side of merge and file on another
66 RECORD_PATH_CONFLICT = b'P'
66 RECORD_PATH_CONFLICT = b'P'
67
67
68 #####
68 #####
69 # possible state which a merge entry can have. These are stored inside top-level
69 # possible state which a merge entry can have. These are stored inside top-level
70 # merge records mentioned just above.
70 # merge records mentioned just above.
71 #####
71 #####
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 # represents that the file was automatically merged in favor
76 # represents that the file was automatically merged in favor
77 # of other version. This info is used on commit.
77 # of other version. This info is used on commit.
78 # This is now deprecated and commit related information is now
78 # This is now deprecated and commit related information is now
79 # stored in RECORD_FILE_VALUES
79 # stored in RECORD_FILE_VALUES
80 MERGE_RECORD_MERGED_OTHER = b'o'
80 MERGE_RECORD_MERGED_OTHER = b'o'
81
81
82 #####
82 #####
83 # top level record which stores other unknown records. Multiple of these can
83 # top level record which stores other unknown records. Multiple of these can
84 # exists
84 # exists
85 #####
85 #####
86 RECORD_OVERRIDE = b't'
86 RECORD_OVERRIDE = b't'
87
87
88 #####
88 #####
89 # legacy records which are no longer used but kept to prevent breaking BC
89 # legacy records which are no longer used but kept to prevent breaking BC
90 #####
90 #####
91 # This record was release in 5.4 and usage was removed in 5.5
91 # This record was release in 5.4 and usage was removed in 5.5
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
93 # This record was release in 3.7 and usage was removed in 5.6
93 # This record was release in 3.7 and usage was removed in 5.6
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
95 # This record was release in 3.7 and usage was removed in 5.6
95 # This record was release in 3.7 and usage was removed in 5.6
96 LEGACY_MERGE_DRIVER_STATE = b'm'
96 LEGACY_MERGE_DRIVER_STATE = b'm'
97 # This record was release in 3.7 and usage was removed in 5.6
97 # This record was release in 3.7 and usage was removed in 5.6
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
99
99
100
100
101 ACTION_FORGET = b'f'
101 ACTION_FORGET = b'f'
102 ACTION_REMOVE = b'r'
102 ACTION_REMOVE = b'r'
103 ACTION_ADD = b'a'
103 ACTION_ADD = b'a'
104 ACTION_GET = b'g'
104 ACTION_GET = b'g'
105 ACTION_PATH_CONFLICT = b'p'
105 ACTION_PATH_CONFLICT = b'p'
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
107 ACTION_ADD_MODIFIED = b'am'
107 ACTION_ADD_MODIFIED = b'am'
108 ACTION_CREATED = b'c'
108 ACTION_CREATED = b'c'
109 ACTION_DELETED_CHANGED = b'dc'
109 ACTION_DELETED_CHANGED = b'dc'
110 ACTION_CHANGED_DELETED = b'cd'
110 ACTION_CHANGED_DELETED = b'cd'
111 ACTION_MERGE = b'm'
111 ACTION_MERGE = b'm'
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
114 ACTION_KEEP = b'k'
114 ACTION_KEEP = b'k'
115 # the file was absent on local side before merge and we should
115 # the file was absent on local side before merge and we should
116 # keep it absent (absent means file not present, it can be a result
116 # keep it absent (absent means file not present, it can be a result
117 # of file deletion, rename etc.)
117 # of file deletion, rename etc.)
118 ACTION_KEEP_ABSENT = b'ka'
118 ACTION_KEEP_ABSENT = b'ka'
119 # the file is absent on the ancestor and remote side of the merge
119 # the file is absent on the ancestor and remote side of the merge
120 # hence this file is new and we should keep it
120 # hence this file is new and we should keep it
121 ACTION_KEEP_NEW = b'kn'
121 ACTION_KEEP_NEW = b'kn'
122 ACTION_EXEC = b'e'
122 ACTION_EXEC = b'e'
123 ACTION_CREATED_MERGE = b'cm'
123 ACTION_CREATED_MERGE = b'cm'
124
124
125 # actions which are no op
125 # actions which are no op
126 NO_OP_ACTIONS = (
126 NO_OP_ACTIONS = (
127 ACTION_KEEP,
127 ACTION_KEEP,
128 ACTION_KEEP_ABSENT,
128 ACTION_KEEP_ABSENT,
129 ACTION_KEEP_NEW,
129 ACTION_KEEP_NEW,
130 )
130 )
131
131
132
132
133 class _mergestate_base(object):
133 class _mergestate_base(object):
134 """track 3-way merge state of individual files
134 """track 3-way merge state of individual files
135
135
136 The merge state is stored on disk when needed. Two files are used: one with
136 The merge state is stored on disk when needed. Two files are used: one with
137 an old format (version 1), and one with a new format (version 2). Version 2
137 an old format (version 1), and one with a new format (version 2). Version 2
138 stores a superset of the data in version 1, including new kinds of records
138 stores a superset of the data in version 1, including new kinds of records
139 in the future. For more about the new format, see the documentation for
139 in the future. For more about the new format, see the documentation for
140 `_readrecordsv2`.
140 `_readrecordsv2`.
141
141
142 Each record can contain arbitrary content, and has an associated type. This
142 Each record can contain arbitrary content, and has an associated type. This
143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
144 versions of Mercurial that don't support it should abort. If `type` is
144 versions of Mercurial that don't support it should abort. If `type` is
145 lowercase, the record can be safely ignored.
145 lowercase, the record can be safely ignored.
146
146
147 Currently known records:
147 Currently known records:
148
148
149 L: the node of the "local" part of the merge (hexified version)
149 L: the node of the "local" part of the merge (hexified version)
150 O: the node of the "other" part of the merge (hexified version)
150 O: the node of the "other" part of the merge (hexified version)
151 F: a file to be merged entry
151 F: a file to be merged entry
152 C: a change/delete or delete/change conflict
152 C: a change/delete or delete/change conflict
153 P: a path conflict (file vs directory)
153 P: a path conflict (file vs directory)
154 f: a (filename, dictionary) tuple of optional values for a given file
154 f: a (filename, dictionary) tuple of optional values for a given file
155 l: the labels for the parts of the merge.
155 l: the labels for the parts of the merge.
156
156
157 Merge record states (stored in self._state, indexed by filename):
157 Merge record states (stored in self._state, indexed by filename):
158 u: unresolved conflict
158 u: unresolved conflict
159 r: resolved conflict
159 r: resolved conflict
160 pu: unresolved path conflict (file conflicts with directory)
160 pu: unresolved path conflict (file conflicts with directory)
161 pr: resolved path conflict
161 pr: resolved path conflict
162 o: file was merged in favor of other parent of merge (DEPRECATED)
162 o: file was merged in favor of other parent of merge (DEPRECATED)
163
163
164 The resolve command transitions between 'u' and 'r' for conflicts and
164 The resolve command transitions between 'u' and 'r' for conflicts and
165 'pu' and 'pr' for path conflicts.
165 'pu' and 'pr' for path conflicts.
166 """
166 """
167
167
168 def __init__(self, repo):
168 def __init__(self, repo):
169 """Initialize the merge state.
169 """Initialize the merge state.
170
170
171 Do not use this directly! Instead call read() or clean()."""
171 Do not use this directly! Instead call read() or clean()."""
172 self._repo = repo
172 self._repo = repo
173 self._state = {}
173 self._state = {}
174 self._stateextras = collections.defaultdict(dict)
174 self._stateextras = collections.defaultdict(dict)
175 self._local = None
175 self._local = None
176 self._other = None
176 self._other = None
177 self._labels = None
177 self._labels = None
178 # contains a mapping of form:
178 # contains a mapping of form:
179 # {filename : (merge_return_value, action_to_be_performed}
179 # {filename : (merge_return_value, action_to_be_performed}
180 # these are results of re-running merge process
180 # these are results of re-running merge process
181 # this dict is used to perform actions on dirstate caused by re-running
181 # this dict is used to perform actions on dirstate caused by re-running
182 # the merge
182 # the merge
183 self._results = {}
183 self._results = {}
184 self._dirty = False
184 self._dirty = False
185
185
186 def reset(self):
186 def reset(self):
187 pass
187 pass
188
188
189 def start(self, node, other, labels=None):
189 def start(self, node, other, labels=None):
190 self._local = node
190 self._local = node
191 self._other = other
191 self._other = other
192 self._labels = labels
192 self._labels = labels
193
193
194 @util.propertycache
194 @util.propertycache
195 def local(self):
195 def local(self):
196 if self._local is None:
196 if self._local is None:
197 msg = b"local accessed but self._local isn't set"
197 msg = b"local accessed but self._local isn't set"
198 raise error.ProgrammingError(msg)
198 raise error.ProgrammingError(msg)
199 return self._local
199 return self._local
200
200
201 @util.propertycache
201 @util.propertycache
202 def localctx(self):
202 def localctx(self):
203 return self._repo[self.local]
203 return self._repo[self.local]
204
204
205 @util.propertycache
205 @util.propertycache
206 def other(self):
206 def other(self):
207 if self._other is None:
207 if self._other is None:
208 msg = b"other accessed but self._other isn't set"
208 msg = b"other accessed but self._other isn't set"
209 raise error.ProgrammingError(msg)
209 raise error.ProgrammingError(msg)
210 return self._other
210 return self._other
211
211
212 @util.propertycache
212 @util.propertycache
213 def otherctx(self):
213 def otherctx(self):
214 return self._repo[self.other]
214 return self._repo[self.other]
215
215
216 def active(self):
216 def active(self):
217 """Whether mergestate is active.
217 """Whether mergestate is active.
218
218
219 Returns True if there appears to be mergestate. This is a rough proxy
219 Returns True if there appears to be mergestate. This is a rough proxy
220 for "is a merge in progress."
220 for "is a merge in progress."
221 """
221 """
222 return bool(self._local) or bool(self._state)
222 return bool(self._local) or bool(self._state)
223
223
224 def commit(self):
224 def commit(self):
225 """Write current state on disk (if necessary)"""
225 """Write current state on disk (if necessary)"""
226
226
227 @staticmethod
227 @staticmethod
228 def getlocalkey(path):
228 def getlocalkey(path):
229 """hash the path of a local file context for storage in the .hg/merge
229 """hash the path of a local file context for storage in the .hg/merge
230 directory."""
230 directory."""
231
231
232 return hex(hashutil.sha1(path).digest())
232 return hex(hashutil.sha1(path).digest())
233
233
234 def _make_backup(self, fctx, localkey):
234 def _make_backup(self, fctx, localkey):
235 raise NotImplementedError()
235 raise NotImplementedError()
236
236
237 def _restore_backup(self, fctx, localkey, flags):
237 def _restore_backup(self, fctx, localkey, flags):
238 raise NotImplementedError()
238 raise NotImplementedError()
239
239
240 def add(self, fcl, fco, fca, fd):
240 def add(self, fcl, fco, fca, fd):
241 """add a new (potentially?) conflicting file the merge state
241 """add a new (potentially?) conflicting file the merge state
242 fcl: file context for local,
242 fcl: file context for local,
243 fco: file context for remote,
243 fco: file context for remote,
244 fca: file context for ancestors,
244 fca: file context for ancestors,
245 fd: file path of the resulting merge.
245 fd: file path of the resulting merge.
246
246
247 note: also write the local version to the `.hg/merge` directory.
247 note: also write the local version to the `.hg/merge` directory.
248 """
248 """
249 if fcl.isabsent():
249 if fcl.isabsent():
250 localkey = self._repo.nodeconstants.nullhex
250 localkey = self._repo.nodeconstants.nullhex
251 else:
251 else:
252 localkey = mergestate.getlocalkey(fcl.path())
252 localkey = mergestate.getlocalkey(fcl.path())
253 self._make_backup(fcl, localkey)
253 self._make_backup(fcl, localkey)
254 self._state[fd] = [
254 self._state[fd] = [
255 MERGE_RECORD_UNRESOLVED,
255 MERGE_RECORD_UNRESOLVED,
256 localkey,
256 localkey,
257 fcl.path(),
257 fcl.path(),
258 fca.path(),
258 fca.path(),
259 hex(fca.filenode()),
259 hex(fca.filenode()),
260 fco.path(),
260 fco.path(),
261 hex(fco.filenode()),
261 hex(fco.filenode()),
262 fcl.flags(),
262 fcl.flags(),
263 ]
263 ]
264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
265 self._dirty = True
265 self._dirty = True
266
266
267 def addpathconflict(self, path, frename, forigin):
267 def addpathconflict(self, path, frename, forigin):
268 """add a new conflicting path to the merge state
268 """add a new conflicting path to the merge state
269 path: the path that conflicts
269 path: the path that conflicts
270 frename: the filename the conflicting file was renamed to
270 frename: the filename the conflicting file was renamed to
271 forigin: origin of the file ('l' or 'r' for local/remote)
271 forigin: origin of the file ('l' or 'r' for local/remote)
272 """
272 """
273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
274 self._dirty = True
274 self._dirty = True
275
275
276 def addcommitinfo(self, path, data):
276 def addcommitinfo(self, path, data):
277 """stores information which is required at commit
277 """stores information which is required at commit
278 into _stateextras"""
278 into _stateextras"""
279 self._stateextras[path].update(data)
279 self._stateextras[path].update(data)
280 self._dirty = True
280 self._dirty = True
281
281
282 def __contains__(self, dfile):
282 def __contains__(self, dfile):
283 return dfile in self._state
283 return dfile in self._state
284
284
285 def __getitem__(self, dfile):
285 def __getitem__(self, dfile):
286 return self._state[dfile][0]
286 return self._state[dfile][0]
287
287
288 def __iter__(self):
288 def __iter__(self):
289 return iter(sorted(self._state))
289 return iter(sorted(self._state))
290
290
291 def files(self):
291 def files(self):
292 return self._state.keys()
292 return self._state.keys()
293
293
294 def mark(self, dfile, state):
294 def mark(self, dfile, state):
295 self._state[dfile][0] = state
295 self._state[dfile][0] = state
296 self._dirty = True
296 self._dirty = True
297
297
298 def unresolved(self):
298 def unresolved(self):
299 """Obtain the paths of unresolved files."""
299 """Obtain the paths of unresolved files."""
300
300
301 for f, entry in pycompat.iteritems(self._state):
301 for f, entry in pycompat.iteritems(self._state):
302 if entry[0] in (
302 if entry[0] in (
303 MERGE_RECORD_UNRESOLVED,
303 MERGE_RECORD_UNRESOLVED,
304 MERGE_RECORD_UNRESOLVED_PATH,
304 MERGE_RECORD_UNRESOLVED_PATH,
305 ):
305 ):
306 yield f
306 yield f
307
307
308 def allextras(self):
308 def allextras(self):
309 """return all extras information stored with the mergestate"""
309 """return all extras information stored with the mergestate"""
310 return self._stateextras
310 return self._stateextras
311
311
312 def extras(self, filename):
312 def extras(self, filename):
313 """return extras stored with the mergestate for the given filename"""
313 """return extras stored with the mergestate for the given filename"""
314 return self._stateextras[filename]
314 return self._stateextras[filename]
315
315
316 def _resolve(self, preresolve, dfile, wctx):
316 def _resolve(self, preresolve, dfile, wctx):
317 """rerun merge process for file path `dfile`.
317 """rerun merge process for file path `dfile`.
318 Returns whether the merge was completed and the return value of merge
318 Returns whether the merge was completed and the return value of merge
319 obtained from filemerge._filemerge().
319 obtained from filemerge._filemerge().
320 """
320 """
321 if self[dfile] in (
321 if self[dfile] in (
322 MERGE_RECORD_RESOLVED,
322 MERGE_RECORD_RESOLVED,
323 LEGACY_RECORD_DRIVER_RESOLVED,
323 LEGACY_RECORD_DRIVER_RESOLVED,
324 ):
324 ):
325 return True, 0
325 return True, 0
326 stateentry = self._state[dfile]
326 stateentry = self._state[dfile]
327 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
327 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
328 octx = self._repo[self._other]
328 octx = self._repo[self._other]
329 extras = self.extras(dfile)
329 extras = self.extras(dfile)
330 anccommitnode = extras.get(b'ancestorlinknode')
330 anccommitnode = extras.get(b'ancestorlinknode')
331 if anccommitnode:
331 if anccommitnode:
332 actx = self._repo[anccommitnode]
332 actx = self._repo[anccommitnode]
333 else:
333 else:
334 actx = None
334 actx = None
335 fcd = _filectxorabsent(localkey, wctx, dfile)
335 fcd = _filectxorabsent(localkey, wctx, dfile)
336 fco = _filectxorabsent(onode, octx, ofile)
336 fco = _filectxorabsent(onode, octx, ofile)
337 # TODO: move this to filectxorabsent
337 # TODO: move this to filectxorabsent
338 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
338 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
339 # "premerge" x flags
339 # "premerge" x flags
340 flo = fco.flags()
340 flo = fco.flags()
341 fla = fca.flags()
341 fla = fca.flags()
342 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
342 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
343 if fca.rev() == nullrev and flags != flo:
343 if fca.rev() == nullrev and flags != flo:
344 if preresolve:
344 if preresolve:
345 self._repo.ui.warn(
345 self._repo.ui.warn(
346 _(
346 _(
347 b'warning: cannot merge flags for %s '
347 b'warning: cannot merge flags for %s '
348 b'without common ancestor - keeping local flags\n'
348 b'without common ancestor - keeping local flags\n'
349 )
349 )
350 % afile
350 % afile
351 )
351 )
352 elif flags == fla:
352 elif flags == fla:
353 flags = flo
353 flags = flo
354 if preresolve:
354 if preresolve:
355 # restore local
355 # restore local
356 if localkey != self._repo.nodeconstants.nullhex:
356 if localkey != self._repo.nodeconstants.nullhex:
357 self._restore_backup(wctx[dfile], localkey, flags)
357 self._restore_backup(wctx[dfile], localkey, flags)
358 else:
358 else:
359 wctx[dfile].remove(ignoremissing=True)
359 wctx[dfile].remove(ignoremissing=True)
360 complete, merge_ret, deleted = filemerge.premerge(
360 complete, merge_ret, deleted = filemerge.premerge(
361 self._repo,
361 self._repo,
362 wctx,
362 wctx,
363 self._local,
363 self._local,
364 lfile,
364 lfile,
365 fcd,
365 fcd,
366 fco,
366 fco,
367 fca,
367 fca,
368 labels=self._labels,
368 labels=self._labels,
369 )
369 )
370 else:
370 else:
371 complete, merge_ret, deleted = filemerge.filemerge(
371 complete, merge_ret, deleted = filemerge.filemerge(
372 self._repo,
372 self._repo,
373 wctx,
373 wctx,
374 self._local,
374 self._local,
375 lfile,
375 lfile,
376 fcd,
376 fcd,
377 fco,
377 fco,
378 fca,
378 fca,
379 labels=self._labels,
379 labels=self._labels,
380 )
380 )
381 if merge_ret is None:
381 if merge_ret is None:
382 # If return value of merge is None, then there are no real conflict
382 # If return value of merge is None, then there are no real conflict
383 del self._state[dfile]
383 del self._state[dfile]
384 self._dirty = True
384 self._dirty = True
385 elif not merge_ret:
385 elif not merge_ret:
386 self.mark(dfile, MERGE_RECORD_RESOLVED)
386 self.mark(dfile, MERGE_RECORD_RESOLVED)
387
387
388 if complete:
388 if complete:
389 action = None
389 action = None
390 if deleted:
390 if deleted:
391 if fcd.isabsent():
391 if fcd.isabsent():
392 # dc: local picked. Need to drop if present, which may
392 # dc: local picked. Need to drop if present, which may
393 # happen on re-resolves.
393 # happen on re-resolves.
394 action = ACTION_FORGET
394 action = ACTION_FORGET
395 else:
395 else:
396 # cd: remote picked (or otherwise deleted)
396 # cd: remote picked (or otherwise deleted)
397 action = ACTION_REMOVE
397 action = ACTION_REMOVE
398 else:
398 else:
399 if fcd.isabsent(): # dc: remote picked
399 if fcd.isabsent(): # dc: remote picked
400 action = ACTION_GET
400 action = ACTION_GET
401 elif fco.isabsent(): # cd: local picked
401 elif fco.isabsent(): # cd: local picked
402 if dfile in self.localctx:
402 if dfile in self.localctx:
403 action = ACTION_ADD_MODIFIED
403 action = ACTION_ADD_MODIFIED
404 else:
404 else:
405 action = ACTION_ADD
405 action = ACTION_ADD
406 # else: regular merges (no action necessary)
406 # else: regular merges (no action necessary)
407 self._results[dfile] = merge_ret, action
407 self._results[dfile] = merge_ret, action
408
408
409 return complete, merge_ret
409 return complete, merge_ret
410
410
411 def preresolve(self, dfile, wctx):
411 def preresolve(self, dfile, wctx):
412 """run premerge process for dfile
412 """run premerge process for dfile
413
413
414 Returns whether the merge is complete, and the exit code."""
414 Returns whether the merge is complete, and the exit code."""
415 return self._resolve(True, dfile, wctx)
415 return self._resolve(True, dfile, wctx)
416
416
417 def resolve(self, dfile, wctx):
417 def resolve(self, dfile, wctx):
418 """run merge process (assuming premerge was run) for dfile
418 """run merge process (assuming premerge was run) for dfile
419
419
420 Returns the exit code of the merge."""
420 Returns the exit code of the merge."""
421 return self._resolve(False, dfile, wctx)[1]
421 return self._resolve(False, dfile, wctx)[1]
422
422
423 def counts(self):
423 def counts(self):
424 """return counts for updated, merged and removed files in this
424 """return counts for updated, merged and removed files in this
425 session"""
425 session"""
426 updated, merged, removed = 0, 0, 0
426 updated, merged, removed = 0, 0, 0
427 for r, action in pycompat.itervalues(self._results):
427 for r, action in pycompat.itervalues(self._results):
428 if r is None:
428 if r is None:
429 updated += 1
429 updated += 1
430 elif r == 0:
430 elif r == 0:
431 if action == ACTION_REMOVE:
431 if action == ACTION_REMOVE:
432 removed += 1
432 removed += 1
433 else:
433 else:
434 merged += 1
434 merged += 1
435 return updated, merged, removed
435 return updated, merged, removed
436
436
437 def unresolvedcount(self):
437 def unresolvedcount(self):
438 """get unresolved count for this merge (persistent)"""
438 """get unresolved count for this merge (persistent)"""
439 return len(list(self.unresolved()))
439 return len(list(self.unresolved()))
440
440
441 def actions(self):
441 def actions(self):
442 """return lists of actions to perform on the dirstate"""
442 """return lists of actions to perform on the dirstate"""
443 actions = {
443 actions = {
444 ACTION_REMOVE: [],
444 ACTION_REMOVE: [],
445 ACTION_FORGET: [],
445 ACTION_FORGET: [],
446 ACTION_ADD: [],
446 ACTION_ADD: [],
447 ACTION_ADD_MODIFIED: [],
447 ACTION_ADD_MODIFIED: [],
448 ACTION_GET: [],
448 ACTION_GET: [],
449 }
449 }
450 for f, (r, action) in pycompat.iteritems(self._results):
450 for f, (r, action) in pycompat.iteritems(self._results):
451 if action is not None:
451 if action is not None:
452 actions[action].append((f, None, b"merge result"))
452 actions[action].append((f, None, b"merge result"))
453 return actions
453 return actions
454
454
455
455
456 class mergestate(_mergestate_base):
456 class mergestate(_mergestate_base):
457
457
458 statepathv1 = b'merge/state'
458 statepathv1 = b'merge/state'
459 statepathv2 = b'merge/state2'
459 statepathv2 = b'merge/state2'
460
460
461 @staticmethod
461 @staticmethod
462 def clean(repo):
462 def clean(repo):
463 """Initialize a brand new merge state, removing any existing state on
463 """Initialize a brand new merge state, removing any existing state on
464 disk."""
464 disk."""
465 ms = mergestate(repo)
465 ms = mergestate(repo)
466 ms.reset()
466 ms.reset()
467 return ms
467 return ms
468
468
469 @staticmethod
469 @staticmethod
470 def read(repo):
470 def read(repo):
471 """Initialize the merge state, reading it from disk."""
471 """Initialize the merge state, reading it from disk."""
472 ms = mergestate(repo)
472 ms = mergestate(repo)
473 ms._read()
473 ms._read()
474 return ms
474 return ms
475
475
476 def _read(self):
476 def _read(self):
477 """Analyse each record content to restore a serialized state from disk
477 """Analyse each record content to restore a serialized state from disk
478
478
479 This function process "record" entry produced by the de-serialization
479 This function process "record" entry produced by the de-serialization
480 of on disk file.
480 of on disk file.
481 """
481 """
482 unsupported = set()
482 unsupported = set()
483 records = self._readrecords()
483 records = self._readrecords()
484 for rtype, record in records:
484 for rtype, record in records:
485 if rtype == RECORD_LOCAL:
485 if rtype == RECORD_LOCAL:
486 self._local = bin(record)
486 self._local = bin(record)
487 elif rtype == RECORD_OTHER:
487 elif rtype == RECORD_OTHER:
488 self._other = bin(record)
488 self._other = bin(record)
489 elif rtype == LEGACY_MERGE_DRIVER_STATE:
489 elif rtype == LEGACY_MERGE_DRIVER_STATE:
490 pass
490 pass
491 elif rtype in (
491 elif rtype in (
492 RECORD_MERGED,
492 RECORD_MERGED,
493 RECORD_CHANGEDELETE_CONFLICT,
493 RECORD_CHANGEDELETE_CONFLICT,
494 RECORD_PATH_CONFLICT,
494 RECORD_PATH_CONFLICT,
495 LEGACY_MERGE_DRIVER_MERGE,
495 LEGACY_MERGE_DRIVER_MERGE,
496 LEGACY_RECORD_RESOLVED_OTHER,
496 LEGACY_RECORD_RESOLVED_OTHER,
497 ):
497 ):
498 bits = record.split(b'\0')
498 bits = record.split(b'\0')
499 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
499 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
500 # and we now store related information in _stateextras, so
500 # and we now store related information in _stateextras, so
501 # lets write to _stateextras directly
501 # lets write to _stateextras directly
502 if bits[1] == MERGE_RECORD_MERGED_OTHER:
502 if bits[1] == MERGE_RECORD_MERGED_OTHER:
503 self._stateextras[bits[0]][b'filenode-source'] = b'other'
503 self._stateextras[bits[0]][b'filenode-source'] = b'other'
504 else:
504 else:
505 self._state[bits[0]] = bits[1:]
505 self._state[bits[0]] = bits[1:]
506 elif rtype == RECORD_FILE_VALUES:
506 elif rtype == RECORD_FILE_VALUES:
507 filename, rawextras = record.split(b'\0', 1)
507 filename, rawextras = record.split(b'\0', 1)
508 extraparts = rawextras.split(b'\0')
508 extraparts = rawextras.split(b'\0')
509 extras = {}
509 extras = {}
510 i = 0
510 i = 0
511 while i < len(extraparts):
511 while i < len(extraparts):
512 extras[extraparts[i]] = extraparts[i + 1]
512 extras[extraparts[i]] = extraparts[i + 1]
513 i += 2
513 i += 2
514
514
515 self._stateextras[filename] = extras
515 self._stateextras[filename] = extras
516 elif rtype == RECORD_LABELS:
516 elif rtype == RECORD_LABELS:
517 labels = record.split(b'\0', 2)
517 labels = record.split(b'\0', 2)
518 self._labels = [l for l in labels if len(l) > 0]
518 self._labels = [l for l in labels if len(l) > 0]
519 elif not rtype.islower():
519 elif not rtype.islower():
520 unsupported.add(rtype)
520 unsupported.add(rtype)
521
521
522 if unsupported:
522 if unsupported:
523 raise error.UnsupportedMergeRecords(unsupported)
523 raise error.UnsupportedMergeRecords(unsupported)
524
524
525 def _readrecords(self):
525 def _readrecords(self):
526 """Read merge state from disk and return a list of record (TYPE, data)
526 """Read merge state from disk and return a list of record (TYPE, data)
527
527
528 We read data from both v1 and v2 files and decide which one to use.
528 We read data from both v1 and v2 files and decide which one to use.
529
529
530 V1 has been used by version prior to 2.9.1 and contains less data than
530 V1 has been used by version prior to 2.9.1 and contains less data than
531 v2. We read both versions and check if no data in v2 contradicts
531 v2. We read both versions and check if no data in v2 contradicts
532 v1. If there is not contradiction we can safely assume that both v1
532 v1. If there is not contradiction we can safely assume that both v1
533 and v2 were written at the same time and use the extract data in v2. If
533 and v2 were written at the same time and use the extract data in v2. If
534 there is contradiction we ignore v2 content as we assume an old version
534 there is contradiction we ignore v2 content as we assume an old version
535 of Mercurial has overwritten the mergestate file and left an old v2
535 of Mercurial has overwritten the mergestate file and left an old v2
536 file around.
536 file around.
537
537
538 returns list of record [(TYPE, data), ...]"""
538 returns list of record [(TYPE, data), ...]"""
539 v1records = self._readrecordsv1()
539 v1records = self._readrecordsv1()
540 v2records = self._readrecordsv2()
540 v2records = self._readrecordsv2()
541 if self._v1v2match(v1records, v2records):
541 if self._v1v2match(v1records, v2records):
542 return v2records
542 return v2records
543 else:
543 else:
544 # v1 file is newer than v2 file, use it
544 # v1 file is newer than v2 file, use it
545 # we have to infer the "other" changeset of the merge
545 # we have to infer the "other" changeset of the merge
546 # we cannot do better than that with v1 of the format
546 # we cannot do better than that with v1 of the format
547 mctx = self._repo[None].parents()[-1]
547 mctx = self._repo[None].parents()[-1]
548 v1records.append((RECORD_OTHER, mctx.hex()))
548 v1records.append((RECORD_OTHER, mctx.hex()))
549 # add place holder "other" file node information
549 # add place holder "other" file node information
550 # nobody is using it yet so we do no need to fetch the data
550 # nobody is using it yet so we do no need to fetch the data
551 # if mctx was wrong `mctx[bits[-2]]` may fails.
551 # if mctx was wrong `mctx[bits[-2]]` may fails.
552 for idx, r in enumerate(v1records):
552 for idx, r in enumerate(v1records):
553 if r[0] == RECORD_MERGED:
553 if r[0] == RECORD_MERGED:
554 bits = r[1].split(b'\0')
554 bits = r[1].split(b'\0')
555 bits.insert(-2, b'')
555 bits.insert(-2, b'')
556 v1records[idx] = (r[0], b'\0'.join(bits))
556 v1records[idx] = (r[0], b'\0'.join(bits))
557 return v1records
557 return v1records
558
558
559 def _v1v2match(self, v1records, v2records):
559 def _v1v2match(self, v1records, v2records):
560 oldv2 = set() # old format version of v2 record
560 oldv2 = set() # old format version of v2 record
561 for rec in v2records:
561 for rec in v2records:
562 if rec[0] == RECORD_LOCAL:
562 if rec[0] == RECORD_LOCAL:
563 oldv2.add(rec)
563 oldv2.add(rec)
564 elif rec[0] == RECORD_MERGED:
564 elif rec[0] == RECORD_MERGED:
565 # drop the onode data (not contained in v1)
565 # drop the onode data (not contained in v1)
566 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
566 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
567 for rec in v1records:
567 for rec in v1records:
568 if rec not in oldv2:
568 if rec not in oldv2:
569 return False
569 return False
570 else:
570 else:
571 return True
571 return True
572
572
573 def _readrecordsv1(self):
573 def _readrecordsv1(self):
574 """read on disk merge state for version 1 file
574 """read on disk merge state for version 1 file
575
575
576 returns list of record [(TYPE, data), ...]
576 returns list of record [(TYPE, data), ...]
577
577
578 Note: the "F" data from this file are one entry short
578 Note: the "F" data from this file are one entry short
579 (no "other file node" entry)
579 (no "other file node" entry)
580 """
580 """
581 records = []
581 records = []
582 try:
582 try:
583 f = self._repo.vfs(self.statepathv1)
583 f = self._repo.vfs(self.statepathv1)
584 for i, l in enumerate(f):
584 for i, l in enumerate(f):
585 if i == 0:
585 if i == 0:
586 records.append((RECORD_LOCAL, l[:-1]))
586 records.append((RECORD_LOCAL, l[:-1]))
587 else:
587 else:
588 records.append((RECORD_MERGED, l[:-1]))
588 records.append((RECORD_MERGED, l[:-1]))
589 f.close()
589 f.close()
590 except IOError as err:
590 except IOError as err:
591 if err.errno != errno.ENOENT:
591 if err.errno != errno.ENOENT:
592 raise
592 raise
593 return records
593 return records
594
594
595 def _readrecordsv2(self):
595 def _readrecordsv2(self):
596 """read on disk merge state for version 2 file
596 """read on disk merge state for version 2 file
597
597
598 This format is a list of arbitrary records of the form:
598 This format is a list of arbitrary records of the form:
599
599
600 [type][length][content]
600 [type][length][content]
601
601
602 `type` is a single character, `length` is a 4 byte integer, and
602 `type` is a single character, `length` is a 4 byte integer, and
603 `content` is an arbitrary byte sequence of length `length`.
603 `content` is an arbitrary byte sequence of length `length`.
604
604
605 Mercurial versions prior to 3.7 have a bug where if there are
605 Mercurial versions prior to 3.7 have a bug where if there are
606 unsupported mandatory merge records, attempting to clear out the merge
606 unsupported mandatory merge records, attempting to clear out the merge
607 state with hg update --clean or similar aborts. The 't' record type
607 state with hg update --clean or similar aborts. The 't' record type
608 works around that by writing out what those versions treat as an
608 works around that by writing out what those versions treat as an
609 advisory record, but later versions interpret as special: the first
609 advisory record, but later versions interpret as special: the first
610 character is the 'real' record type and everything onwards is the data.
610 character is the 'real' record type and everything onwards is the data.
611
611
612 Returns list of records [(TYPE, data), ...]."""
612 Returns list of records [(TYPE, data), ...]."""
613 records = []
613 records = []
614 try:
614 try:
615 f = self._repo.vfs(self.statepathv2)
615 f = self._repo.vfs(self.statepathv2)
616 data = f.read()
616 data = f.read()
617 off = 0
617 off = 0
618 end = len(data)
618 end = len(data)
619 while off < end:
619 while off < end:
620 rtype = data[off : off + 1]
620 rtype = data[off : off + 1]
621 off += 1
621 off += 1
622 length = _unpack(b'>I', data[off : (off + 4)])[0]
622 length = _unpack(b'>I', data[off : (off + 4)])[0]
623 off += 4
623 off += 4
624 record = data[off : (off + length)]
624 record = data[off : (off + length)]
625 off += length
625 off += length
626 if rtype == RECORD_OVERRIDE:
626 if rtype == RECORD_OVERRIDE:
627 rtype, record = record[0:1], record[1:]
627 rtype, record = record[0:1], record[1:]
628 records.append((rtype, record))
628 records.append((rtype, record))
629 f.close()
629 f.close()
630 except IOError as err:
630 except IOError as err:
631 if err.errno != errno.ENOENT:
631 if err.errno != errno.ENOENT:
632 raise
632 raise
633 return records
633 return records
634
634
635 def commit(self):
635 def commit(self):
636 if self._dirty:
636 if self._dirty:
637 records = self._makerecords()
637 records = self._makerecords()
638 self._writerecords(records)
638 self._writerecords(records)
639 self._dirty = False
639 self._dirty = False
640
640
641 def _makerecords(self):
641 def _makerecords(self):
642 records = []
642 records = []
643 records.append((RECORD_LOCAL, hex(self._local)))
643 records.append((RECORD_LOCAL, hex(self._local)))
644 records.append((RECORD_OTHER, hex(self._other)))
644 records.append((RECORD_OTHER, hex(self._other)))
645 # Write out state items. In all cases, the value of the state map entry
645 # Write out state items. In all cases, the value of the state map entry
646 # is written as the contents of the record. The record type depends on
646 # is written as the contents of the record. The record type depends on
647 # the type of state that is stored, and capital-letter records are used
647 # the type of state that is stored, and capital-letter records are used
648 # to prevent older versions of Mercurial that do not support the feature
648 # to prevent older versions of Mercurial that do not support the feature
649 # from loading them.
649 # from loading them.
650 for filename, v in pycompat.iteritems(self._state):
650 for filename, v in pycompat.iteritems(self._state):
651 if v[0] in (
651 if v[0] in (
652 MERGE_RECORD_UNRESOLVED_PATH,
652 MERGE_RECORD_UNRESOLVED_PATH,
653 MERGE_RECORD_RESOLVED_PATH,
653 MERGE_RECORD_RESOLVED_PATH,
654 ):
654 ):
655 # Path conflicts. These are stored in 'P' records. The current
655 # Path conflicts. These are stored in 'P' records. The current
656 # resolution state ('pu' or 'pr') is stored within the record.
656 # resolution state ('pu' or 'pr') is stored within the record.
657 records.append(
657 records.append(
658 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
658 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
659 )
659 )
660 elif (
660 elif (
661 v[1] == self._repo.nodeconstants.nullhex
661 v[1] == self._repo.nodeconstants.nullhex
662 or v[6] == self._repo.nodeconstants.nullhex
662 or v[6] == self._repo.nodeconstants.nullhex
663 ):
663 ):
664 # Change/Delete or Delete/Change conflicts. These are stored in
664 # Change/Delete or Delete/Change conflicts. These are stored in
665 # 'C' records. v[1] is the local file, and is nullhex when the
665 # 'C' records. v[1] is the local file, and is nullhex when the
666 # file is deleted locally ('dc'). v[6] is the remote file, and
666 # file is deleted locally ('dc'). v[6] is the remote file, and
667 # is nullhex when the file is deleted remotely ('cd').
667 # is nullhex when the file is deleted remotely ('cd').
668 records.append(
668 records.append(
669 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
669 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
670 )
670 )
671 else:
671 else:
672 # Normal files. These are stored in 'F' records.
672 # Normal files. These are stored in 'F' records.
673 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
673 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
674 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
674 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
675 rawextras = b'\0'.join(
675 rawextras = b'\0'.join(
676 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
676 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
677 )
677 )
678 records.append(
678 records.append(
679 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
679 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
680 )
680 )
681 if self._labels is not None:
681 if self._labels is not None:
682 labels = b'\0'.join(self._labels)
682 labels = b'\0'.join(self._labels)
683 records.append((RECORD_LABELS, labels))
683 records.append((RECORD_LABELS, labels))
684 return records
684 return records
685
685
686 def _writerecords(self, records):
686 def _writerecords(self, records):
687 """Write current state on disk (both v1 and v2)"""
687 """Write current state on disk (both v1 and v2)"""
688 self._writerecordsv1(records)
688 self._writerecordsv1(records)
689 self._writerecordsv2(records)
689 self._writerecordsv2(records)
690
690
691 def _writerecordsv1(self, records):
691 def _writerecordsv1(self, records):
692 """Write current state on disk in a version 1 file"""
692 """Write current state on disk in a version 1 file"""
693 f = self._repo.vfs(self.statepathv1, b'wb')
693 f = self._repo.vfs(self.statepathv1, b'wb')
694 irecords = iter(records)
694 irecords = iter(records)
695 lrecords = next(irecords)
695 lrecords = next(irecords)
696 assert lrecords[0] == RECORD_LOCAL
696 assert lrecords[0] == RECORD_LOCAL
697 f.write(hex(self._local) + b'\n')
697 f.write(hex(self._local) + b'\n')
698 for rtype, data in irecords:
698 for rtype, data in irecords:
699 if rtype == RECORD_MERGED:
699 if rtype == RECORD_MERGED:
700 f.write(b'%s\n' % _droponode(data))
700 f.write(b'%s\n' % _droponode(data))
701 f.close()
701 f.close()
702
702
703 def _writerecordsv2(self, records):
703 def _writerecordsv2(self, records):
704 """Write current state on disk in a version 2 file
704 """Write current state on disk in a version 2 file
705
705
706 See the docstring for _readrecordsv2 for why we use 't'."""
706 See the docstring for _readrecordsv2 for why we use 't'."""
707 # these are the records that all version 2 clients can read
707 # these are the records that all version 2 clients can read
708 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
708 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
709 f = self._repo.vfs(self.statepathv2, b'wb')
709 f = self._repo.vfs(self.statepathv2, b'wb')
710 for key, data in records:
710 for key, data in records:
711 assert len(key) == 1
711 assert len(key) == 1
712 if key not in allowlist:
712 if key not in allowlist:
713 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
713 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
714 format = b'>sI%is' % len(data)
714 format = b'>sI%is' % len(data)
715 f.write(_pack(format, key, len(data), data))
715 f.write(_pack(format, key, len(data), data))
716 f.close()
716 f.close()
717
717
718 def _make_backup(self, fctx, localkey):
718 def _make_backup(self, fctx, localkey):
719 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
719 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
720
720
721 def _restore_backup(self, fctx, localkey, flags):
721 def _restore_backup(self, fctx, localkey, flags):
722 with self._repo.vfs(b'merge/' + localkey) as f:
722 with self._repo.vfs(b'merge/' + localkey) as f:
723 fctx.write(f.read(), flags)
723 fctx.write(f.read(), flags)
724
724
725 def reset(self):
725 def reset(self):
726 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
726 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
727
727
728
728
729 class memmergestate(_mergestate_base):
729 class memmergestate(_mergestate_base):
730 def __init__(self, repo):
730 def __init__(self, repo):
731 super(memmergestate, self).__init__(repo)
731 super(memmergestate, self).__init__(repo)
732 self._backups = {}
732 self._backups = {}
733
733
734 def _make_backup(self, fctx, localkey):
734 def _make_backup(self, fctx, localkey):
735 self._backups[localkey] = fctx.data()
735 self._backups[localkey] = fctx.data()
736
736
737 def _restore_backup(self, fctx, localkey, flags):
737 def _restore_backup(self, fctx, localkey, flags):
738 fctx.write(self._backups[localkey], flags)
738 fctx.write(self._backups[localkey], flags)
739
739
740
740
741 def recordupdates(repo, actions, branchmerge, getfiledata):
741 def recordupdates(repo, actions, branchmerge, getfiledata):
742 """record merge actions to the dirstate"""
742 """record merge actions to the dirstate"""
743 # remove (must come first)
743 # remove (must come first)
744 for f, args, msg in actions.get(ACTION_REMOVE, []):
744 for f, args, msg in actions.get(ACTION_REMOVE, []):
745 if branchmerge:
745 if branchmerge:
746 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
746 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
747 else:
747 else:
748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
749
749
750 # forget (must come first)
750 # forget (must come first)
751 for f, args, msg in actions.get(ACTION_FORGET, []):
751 for f, args, msg in actions.get(ACTION_FORGET, []):
752 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
752 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
753
753
754 # resolve path conflicts
754 # resolve path conflicts
755 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
755 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
756 (f0, origf0) = args
756 (f0, origf0) = args
757 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
757 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
758 repo.dirstate.copy(origf0, f)
758 repo.dirstate.copy(origf0, f)
759 if f0 == origf0:
759 if f0 == origf0:
760 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
760 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
761 else:
761 else:
762 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
762 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
763
763
764 # re-add
764 # re-add
765 for f, args, msg in actions.get(ACTION_ADD, []):
765 for f, args, msg in actions.get(ACTION_ADD, []):
766 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
766 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
767
767
768 # re-add/mark as modified
768 # re-add/mark as modified
769 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
769 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
770 if branchmerge:
770 if branchmerge:
771 repo.dirstate.normallookup(f)
771 repo.dirstate.normallookup(f)
772 else:
772 else:
773 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
773 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
774
774
775 # exec change
775 # exec change
776 for f, args, msg in actions.get(ACTION_EXEC, []):
776 for f, args, msg in actions.get(ACTION_EXEC, []):
777 repo.dirstate.normallookup(f)
777 repo.dirstate.normallookup(f)
778
778
779 # keep
779 # keep
780 for f, args, msg in actions.get(ACTION_KEEP, []):
780 for f, args, msg in actions.get(ACTION_KEEP, []):
781 pass
781 pass
782
782
783 # keep deleted
783 # keep deleted
784 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
784 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
785 pass
785 pass
786
786
787 # keep new
787 # keep new
788 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
788 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
789 pass
789 pass
790
790
791 # get
791 # get
792 for f, args, msg in actions.get(ACTION_GET, []):
792 for f, args, msg in actions.get(ACTION_GET, []):
793 if branchmerge:
793 if branchmerge:
794 # tracked in p1 can be True also but update_file should not care
794 # tracked in p1 can be True also but update_file should not care
795 repo.dirstate.update_file(
795 repo.dirstate.update_file(
796 f,
796 f,
797 p1_tracked=False,
797 p1_tracked=False,
798 p2_tracked=True,
798 p2_tracked=True,
799 wc_tracked=True,
799 wc_tracked=True,
800 clean_p2=True,
800 clean_p2=True,
801 )
801 )
802 else:
802 else:
803 parentfiledata = getfiledata[f] if getfiledata else None
803 parentfiledata = getfiledata[f] if getfiledata else None
804 repo.dirstate.update_parent_file_data(f, parentfiledata)
804 repo.dirstate.update_file(
805 f,
806 p1_tracked=True,
807 wc_tracked=True,
808 parentfiledata=parentfiledata,
809 )
805
810
806 # merge
811 # merge
807 for f, args, msg in actions.get(ACTION_MERGE, []):
812 for f, args, msg in actions.get(ACTION_MERGE, []):
808 f1, f2, fa, move, anc = args
813 f1, f2, fa, move, anc = args
809 if branchmerge:
814 if branchmerge:
810 # We've done a branch merge, mark this file as merged
815 # We've done a branch merge, mark this file as merged
811 # so that we properly record the merger later
816 # so that we properly record the merger later
812 repo.dirstate.update_file(
817 repo.dirstate.update_file(
813 f, p1_tracked=True, wc_tracked=True, merged=True
818 f, p1_tracked=True, wc_tracked=True, merged=True
814 )
819 )
815 if f1 != f2: # copy/rename
820 if f1 != f2: # copy/rename
816 if move:
821 if move:
817 repo.dirstate.update_file(
822 repo.dirstate.update_file(
818 f1, p1_tracked=True, wc_tracked=False
823 f1, p1_tracked=True, wc_tracked=False
819 )
824 )
820 if f1 != f:
825 if f1 != f:
821 repo.dirstate.copy(f1, f)
826 repo.dirstate.copy(f1, f)
822 else:
827 else:
823 repo.dirstate.copy(f2, f)
828 repo.dirstate.copy(f2, f)
824 else:
829 else:
825 # We've update-merged a locally modified file, so
830 # We've update-merged a locally modified file, so
826 # we set the dirstate to emulate a normal checkout
831 # we set the dirstate to emulate a normal checkout
827 # of that file some time in the past. Thus our
832 # of that file some time in the past. Thus our
828 # merge will appear as a normal local file
833 # merge will appear as a normal local file
829 # modification.
834 # modification.
830 if f2 == f: # file not locally copied/moved
835 if f2 == f: # file not locally copied/moved
831 repo.dirstate.update_file(
836 repo.dirstate.update_file(
832 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
837 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
833 )
838 )
834 if move:
839 if move:
835 repo.dirstate.update_file(
840 repo.dirstate.update_file(
836 f1, p1_tracked=False, wc_tracked=False
841 f1, p1_tracked=False, wc_tracked=False
837 )
842 )
838
843
839 # directory rename, move local
844 # directory rename, move local
840 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
845 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
841 f0, flag = args
846 f0, flag = args
842 if branchmerge:
847 if branchmerge:
843 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
848 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
844 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
849 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
845 repo.dirstate.copy(f0, f)
850 repo.dirstate.copy(f0, f)
846 else:
851 else:
847 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
852 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
848 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
853 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
849
854
850 # directory rename, get
855 # directory rename, get
851 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
856 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
852 f0, flag = args
857 f0, flag = args
853 if branchmerge:
858 if branchmerge:
854 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
859 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
855 repo.dirstate.copy(f0, f)
860 repo.dirstate.copy(f0, f)
856 else:
861 else:
857 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
862 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
General Comments 0
You need to be logged in to leave comments. Login now