##// END OF EJS Templates
dirstate: include explicit matches in match.traversedir calls...
Martin von Zweigbergk -
r44112:95d2eab0 default
parent child Browse files
Show More
@@ -1,1843 +1,1848 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from .pycompat import delattr
18 from .pycompat import delattr
19
19
20 from hgdemandimport import tracing
20 from hgdemandimport import tracing
21
21
22 from . import (
22 from . import (
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 txnutil,
30 txnutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 propertycache = util.propertycache
42 propertycache = util.propertycache
43 filecache = scmutil.filecache
43 filecache = scmutil.filecache
44 _rangemask = 0x7FFFFFFF
44 _rangemask = 0x7FFFFFFF
45
45
46 dirstatetuple = parsers.dirstatetuple
46 dirstatetuple = parsers.dirstatetuple
47
47
48
48
49 class repocache(filecache):
49 class repocache(filecache):
50 """filecache for files in .hg/"""
50 """filecache for files in .hg/"""
51
51
52 def join(self, obj, fname):
52 def join(self, obj, fname):
53 return obj._opener.join(fname)
53 return obj._opener.join(fname)
54
54
55
55
56 class rootcache(filecache):
56 class rootcache(filecache):
57 """filecache for files in the repository root"""
57 """filecache for files in the repository root"""
58
58
59 def join(self, obj, fname):
59 def join(self, obj, fname):
60 return obj._join(fname)
60 return obj._join(fname)
61
61
62
62
63 def _getfsnow(vfs):
63 def _getfsnow(vfs):
64 '''Get "now" timestamp on filesystem'''
64 '''Get "now" timestamp on filesystem'''
65 tmpfd, tmpname = vfs.mkstemp()
65 tmpfd, tmpname = vfs.mkstemp()
66 try:
66 try:
67 return os.fstat(tmpfd)[stat.ST_MTIME]
67 return os.fstat(tmpfd)[stat.ST_MTIME]
68 finally:
68 finally:
69 os.close(tmpfd)
69 os.close(tmpfd)
70 vfs.unlink(tmpname)
70 vfs.unlink(tmpname)
71
71
72
72
73 @interfaceutil.implementer(intdirstate.idirstate)
73 @interfaceutil.implementer(intdirstate.idirstate)
74 class dirstate(object):
74 class dirstate(object):
75 def __init__(self, opener, ui, root, validate, sparsematchfn):
75 def __init__(self, opener, ui, root, validate, sparsematchfn):
76 '''Create a new dirstate object.
76 '''Create a new dirstate object.
77
77
78 opener is an open()-like callable that can be used to open the
78 opener is an open()-like callable that can be used to open the
79 dirstate file; root is the root of the directory tracked by
79 dirstate file; root is the root of the directory tracked by
80 the dirstate.
80 the dirstate.
81 '''
81 '''
82 self._opener = opener
82 self._opener = opener
83 self._validate = validate
83 self._validate = validate
84 self._root = root
84 self._root = root
85 self._sparsematchfn = sparsematchfn
85 self._sparsematchfn = sparsematchfn
86 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
86 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
87 # UNC path pointing to root share (issue4557)
87 # UNC path pointing to root share (issue4557)
88 self._rootdir = pathutil.normasprefix(root)
88 self._rootdir = pathutil.normasprefix(root)
89 self._dirty = False
89 self._dirty = False
90 self._lastnormaltime = 0
90 self._lastnormaltime = 0
91 self._ui = ui
91 self._ui = ui
92 self._filecache = {}
92 self._filecache = {}
93 self._parentwriters = 0
93 self._parentwriters = 0
94 self._filename = b'dirstate'
94 self._filename = b'dirstate'
95 self._pendingfilename = b'%s.pending' % self._filename
95 self._pendingfilename = b'%s.pending' % self._filename
96 self._plchangecallbacks = {}
96 self._plchangecallbacks = {}
97 self._origpl = None
97 self._origpl = None
98 self._updatedfiles = set()
98 self._updatedfiles = set()
99 self._mapcls = dirstatemap
99 self._mapcls = dirstatemap
100 # Access and cache cwd early, so we don't access it for the first time
100 # Access and cache cwd early, so we don't access it for the first time
101 # after a working-copy update caused it to not exist (accessing it then
101 # after a working-copy update caused it to not exist (accessing it then
102 # raises an exception).
102 # raises an exception).
103 self._cwd
103 self._cwd
104
104
105 @contextlib.contextmanager
105 @contextlib.contextmanager
106 def parentchange(self):
106 def parentchange(self):
107 '''Context manager for handling dirstate parents.
107 '''Context manager for handling dirstate parents.
108
108
109 If an exception occurs in the scope of the context manager,
109 If an exception occurs in the scope of the context manager,
110 the incoherent dirstate won't be written when wlock is
110 the incoherent dirstate won't be written when wlock is
111 released.
111 released.
112 '''
112 '''
113 self._parentwriters += 1
113 self._parentwriters += 1
114 yield
114 yield
115 # Typically we want the "undo" step of a context manager in a
115 # Typically we want the "undo" step of a context manager in a
116 # finally block so it happens even when an exception
116 # finally block so it happens even when an exception
117 # occurs. In this case, however, we only want to decrement
117 # occurs. In this case, however, we only want to decrement
118 # parentwriters if the code in the with statement exits
118 # parentwriters if the code in the with statement exits
119 # normally, so we don't have a try/finally here on purpose.
119 # normally, so we don't have a try/finally here on purpose.
120 self._parentwriters -= 1
120 self._parentwriters -= 1
121
121
122 def pendingparentchange(self):
122 def pendingparentchange(self):
123 '''Returns true if the dirstate is in the middle of a set of changes
123 '''Returns true if the dirstate is in the middle of a set of changes
124 that modify the dirstate parent.
124 that modify the dirstate parent.
125 '''
125 '''
126 return self._parentwriters > 0
126 return self._parentwriters > 0
127
127
128 @propertycache
128 @propertycache
129 def _map(self):
129 def _map(self):
130 """Return the dirstate contents (see documentation for dirstatemap)."""
130 """Return the dirstate contents (see documentation for dirstatemap)."""
131 self._map = self._mapcls(self._ui, self._opener, self._root)
131 self._map = self._mapcls(self._ui, self._opener, self._root)
132 return self._map
132 return self._map
133
133
134 @property
134 @property
135 def _sparsematcher(self):
135 def _sparsematcher(self):
136 """The matcher for the sparse checkout.
136 """The matcher for the sparse checkout.
137
137
138 The working directory may not include every file from a manifest. The
138 The working directory may not include every file from a manifest. The
139 matcher obtained by this property will match a path if it is to be
139 matcher obtained by this property will match a path if it is to be
140 included in the working directory.
140 included in the working directory.
141 """
141 """
142 # TODO there is potential to cache this property. For now, the matcher
142 # TODO there is potential to cache this property. For now, the matcher
143 # is resolved on every access. (But the called function does use a
143 # is resolved on every access. (But the called function does use a
144 # cache to keep the lookup fast.)
144 # cache to keep the lookup fast.)
145 return self._sparsematchfn()
145 return self._sparsematchfn()
146
146
147 @repocache(b'branch')
147 @repocache(b'branch')
148 def _branch(self):
148 def _branch(self):
149 try:
149 try:
150 return self._opener.read(b"branch").strip() or b"default"
150 return self._opener.read(b"branch").strip() or b"default"
151 except IOError as inst:
151 except IOError as inst:
152 if inst.errno != errno.ENOENT:
152 if inst.errno != errno.ENOENT:
153 raise
153 raise
154 return b"default"
154 return b"default"
155
155
156 @property
156 @property
157 def _pl(self):
157 def _pl(self):
158 return self._map.parents()
158 return self._map.parents()
159
159
160 def hasdir(self, d):
160 def hasdir(self, d):
161 return self._map.hastrackeddir(d)
161 return self._map.hastrackeddir(d)
162
162
163 @rootcache(b'.hgignore')
163 @rootcache(b'.hgignore')
164 def _ignore(self):
164 def _ignore(self):
165 files = self._ignorefiles()
165 files = self._ignorefiles()
166 if not files:
166 if not files:
167 return matchmod.never()
167 return matchmod.never()
168
168
169 pats = [b'include:%s' % f for f in files]
169 pats = [b'include:%s' % f for f in files]
170 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
170 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
171
171
172 @propertycache
172 @propertycache
173 def _slash(self):
173 def _slash(self):
174 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
174 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
175
175
176 @propertycache
176 @propertycache
177 def _checklink(self):
177 def _checklink(self):
178 return util.checklink(self._root)
178 return util.checklink(self._root)
179
179
180 @propertycache
180 @propertycache
181 def _checkexec(self):
181 def _checkexec(self):
182 return util.checkexec(self._root)
182 return util.checkexec(self._root)
183
183
184 @propertycache
184 @propertycache
185 def _checkcase(self):
185 def _checkcase(self):
186 return not util.fscasesensitive(self._join(b'.hg'))
186 return not util.fscasesensitive(self._join(b'.hg'))
187
187
188 def _join(self, f):
188 def _join(self, f):
189 # much faster than os.path.join()
189 # much faster than os.path.join()
190 # it's safe because f is always a relative path
190 # it's safe because f is always a relative path
191 return self._rootdir + f
191 return self._rootdir + f
192
192
193 def flagfunc(self, buildfallback):
193 def flagfunc(self, buildfallback):
194 if self._checklink and self._checkexec:
194 if self._checklink and self._checkexec:
195
195
196 def f(x):
196 def f(x):
197 try:
197 try:
198 st = os.lstat(self._join(x))
198 st = os.lstat(self._join(x))
199 if util.statislink(st):
199 if util.statislink(st):
200 return b'l'
200 return b'l'
201 if util.statisexec(st):
201 if util.statisexec(st):
202 return b'x'
202 return b'x'
203 except OSError:
203 except OSError:
204 pass
204 pass
205 return b''
205 return b''
206
206
207 return f
207 return f
208
208
209 fallback = buildfallback()
209 fallback = buildfallback()
210 if self._checklink:
210 if self._checklink:
211
211
212 def f(x):
212 def f(x):
213 if os.path.islink(self._join(x)):
213 if os.path.islink(self._join(x)):
214 return b'l'
214 return b'l'
215 if b'x' in fallback(x):
215 if b'x' in fallback(x):
216 return b'x'
216 return b'x'
217 return b''
217 return b''
218
218
219 return f
219 return f
220 if self._checkexec:
220 if self._checkexec:
221
221
222 def f(x):
222 def f(x):
223 if b'l' in fallback(x):
223 if b'l' in fallback(x):
224 return b'l'
224 return b'l'
225 if util.isexec(self._join(x)):
225 if util.isexec(self._join(x)):
226 return b'x'
226 return b'x'
227 return b''
227 return b''
228
228
229 return f
229 return f
230 else:
230 else:
231 return fallback
231 return fallback
232
232
233 @propertycache
233 @propertycache
234 def _cwd(self):
234 def _cwd(self):
235 # internal config: ui.forcecwd
235 # internal config: ui.forcecwd
236 forcecwd = self._ui.config(b'ui', b'forcecwd')
236 forcecwd = self._ui.config(b'ui', b'forcecwd')
237 if forcecwd:
237 if forcecwd:
238 return forcecwd
238 return forcecwd
239 return encoding.getcwd()
239 return encoding.getcwd()
240
240
241 def getcwd(self):
241 def getcwd(self):
242 '''Return the path from which a canonical path is calculated.
242 '''Return the path from which a canonical path is calculated.
243
243
244 This path should be used to resolve file patterns or to convert
244 This path should be used to resolve file patterns or to convert
245 canonical paths back to file paths for display. It shouldn't be
245 canonical paths back to file paths for display. It shouldn't be
246 used to get real file paths. Use vfs functions instead.
246 used to get real file paths. Use vfs functions instead.
247 '''
247 '''
248 cwd = self._cwd
248 cwd = self._cwd
249 if cwd == self._root:
249 if cwd == self._root:
250 return b''
250 return b''
251 # self._root ends with a path separator if self._root is '/' or 'C:\'
251 # self._root ends with a path separator if self._root is '/' or 'C:\'
252 rootsep = self._root
252 rootsep = self._root
253 if not util.endswithsep(rootsep):
253 if not util.endswithsep(rootsep):
254 rootsep += pycompat.ossep
254 rootsep += pycompat.ossep
255 if cwd.startswith(rootsep):
255 if cwd.startswith(rootsep):
256 return cwd[len(rootsep) :]
256 return cwd[len(rootsep) :]
257 else:
257 else:
258 # we're outside the repo. return an absolute path.
258 # we're outside the repo. return an absolute path.
259 return cwd
259 return cwd
260
260
261 def pathto(self, f, cwd=None):
261 def pathto(self, f, cwd=None):
262 if cwd is None:
262 if cwd is None:
263 cwd = self.getcwd()
263 cwd = self.getcwd()
264 path = util.pathto(self._root, cwd, f)
264 path = util.pathto(self._root, cwd, f)
265 if self._slash:
265 if self._slash:
266 return util.pconvert(path)
266 return util.pconvert(path)
267 return path
267 return path
268
268
269 def __getitem__(self, key):
269 def __getitem__(self, key):
270 '''Return the current state of key (a filename) in the dirstate.
270 '''Return the current state of key (a filename) in the dirstate.
271
271
272 States are:
272 States are:
273 n normal
273 n normal
274 m needs merging
274 m needs merging
275 r marked for removal
275 r marked for removal
276 a marked for addition
276 a marked for addition
277 ? not tracked
277 ? not tracked
278 '''
278 '''
279 return self._map.get(key, (b"?",))[0]
279 return self._map.get(key, (b"?",))[0]
280
280
281 def __contains__(self, key):
281 def __contains__(self, key):
282 return key in self._map
282 return key in self._map
283
283
284 def __iter__(self):
284 def __iter__(self):
285 return iter(sorted(self._map))
285 return iter(sorted(self._map))
286
286
287 def items(self):
287 def items(self):
288 return pycompat.iteritems(self._map)
288 return pycompat.iteritems(self._map)
289
289
290 iteritems = items
290 iteritems = items
291
291
292 def parents(self):
292 def parents(self):
293 return [self._validate(p) for p in self._pl]
293 return [self._validate(p) for p in self._pl]
294
294
295 def p1(self):
295 def p1(self):
296 return self._validate(self._pl[0])
296 return self._validate(self._pl[0])
297
297
298 def p2(self):
298 def p2(self):
299 return self._validate(self._pl[1])
299 return self._validate(self._pl[1])
300
300
301 def branch(self):
301 def branch(self):
302 return encoding.tolocal(self._branch)
302 return encoding.tolocal(self._branch)
303
303
304 def setparents(self, p1, p2=nullid):
304 def setparents(self, p1, p2=nullid):
305 """Set dirstate parents to p1 and p2.
305 """Set dirstate parents to p1 and p2.
306
306
307 When moving from two parents to one, 'm' merged entries a
307 When moving from two parents to one, 'm' merged entries a
308 adjusted to normal and previous copy records discarded and
308 adjusted to normal and previous copy records discarded and
309 returned by the call.
309 returned by the call.
310
310
311 See localrepo.setparents()
311 See localrepo.setparents()
312 """
312 """
313 if self._parentwriters == 0:
313 if self._parentwriters == 0:
314 raise ValueError(
314 raise ValueError(
315 b"cannot set dirstate parent outside of "
315 b"cannot set dirstate parent outside of "
316 b"dirstate.parentchange context manager"
316 b"dirstate.parentchange context manager"
317 )
317 )
318
318
319 self._dirty = True
319 self._dirty = True
320 oldp2 = self._pl[1]
320 oldp2 = self._pl[1]
321 if self._origpl is None:
321 if self._origpl is None:
322 self._origpl = self._pl
322 self._origpl = self._pl
323 self._map.setparents(p1, p2)
323 self._map.setparents(p1, p2)
324 copies = {}
324 copies = {}
325 if oldp2 != nullid and p2 == nullid:
325 if oldp2 != nullid and p2 == nullid:
326 candidatefiles = self._map.nonnormalset.union(
326 candidatefiles = self._map.nonnormalset.union(
327 self._map.otherparentset
327 self._map.otherparentset
328 )
328 )
329 for f in candidatefiles:
329 for f in candidatefiles:
330 s = self._map.get(f)
330 s = self._map.get(f)
331 if s is None:
331 if s is None:
332 continue
332 continue
333
333
334 # Discard 'm' markers when moving away from a merge state
334 # Discard 'm' markers when moving away from a merge state
335 if s[0] == b'm':
335 if s[0] == b'm':
336 source = self._map.copymap.get(f)
336 source = self._map.copymap.get(f)
337 if source:
337 if source:
338 copies[f] = source
338 copies[f] = source
339 self.normallookup(f)
339 self.normallookup(f)
340 # Also fix up otherparent markers
340 # Also fix up otherparent markers
341 elif s[0] == b'n' and s[2] == -2:
341 elif s[0] == b'n' and s[2] == -2:
342 source = self._map.copymap.get(f)
342 source = self._map.copymap.get(f)
343 if source:
343 if source:
344 copies[f] = source
344 copies[f] = source
345 self.add(f)
345 self.add(f)
346 return copies
346 return copies
347
347
348 def setbranch(self, branch):
348 def setbranch(self, branch):
349 self.__class__._branch.set(self, encoding.fromlocal(branch))
349 self.__class__._branch.set(self, encoding.fromlocal(branch))
350 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
350 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
351 try:
351 try:
352 f.write(self._branch + b'\n')
352 f.write(self._branch + b'\n')
353 f.close()
353 f.close()
354
354
355 # make sure filecache has the correct stat info for _branch after
355 # make sure filecache has the correct stat info for _branch after
356 # replacing the underlying file
356 # replacing the underlying file
357 ce = self._filecache[b'_branch']
357 ce = self._filecache[b'_branch']
358 if ce:
358 if ce:
359 ce.refresh()
359 ce.refresh()
360 except: # re-raises
360 except: # re-raises
361 f.discard()
361 f.discard()
362 raise
362 raise
363
363
364 def invalidate(self):
364 def invalidate(self):
365 '''Causes the next access to reread the dirstate.
365 '''Causes the next access to reread the dirstate.
366
366
367 This is different from localrepo.invalidatedirstate() because it always
367 This is different from localrepo.invalidatedirstate() because it always
368 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
368 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
369 check whether the dirstate has changed before rereading it.'''
369 check whether the dirstate has changed before rereading it.'''
370
370
371 for a in ("_map", "_branch", "_ignore"):
371 for a in ("_map", "_branch", "_ignore"):
372 if a in self.__dict__:
372 if a in self.__dict__:
373 delattr(self, a)
373 delattr(self, a)
374 self._lastnormaltime = 0
374 self._lastnormaltime = 0
375 self._dirty = False
375 self._dirty = False
376 self._updatedfiles.clear()
376 self._updatedfiles.clear()
377 self._parentwriters = 0
377 self._parentwriters = 0
378 self._origpl = None
378 self._origpl = None
379
379
380 def copy(self, source, dest):
380 def copy(self, source, dest):
381 """Mark dest as a copy of source. Unmark dest if source is None."""
381 """Mark dest as a copy of source. Unmark dest if source is None."""
382 if source == dest:
382 if source == dest:
383 return
383 return
384 self._dirty = True
384 self._dirty = True
385 if source is not None:
385 if source is not None:
386 self._map.copymap[dest] = source
386 self._map.copymap[dest] = source
387 self._updatedfiles.add(source)
387 self._updatedfiles.add(source)
388 self._updatedfiles.add(dest)
388 self._updatedfiles.add(dest)
389 elif self._map.copymap.pop(dest, None):
389 elif self._map.copymap.pop(dest, None):
390 self._updatedfiles.add(dest)
390 self._updatedfiles.add(dest)
391
391
392 def copied(self, file):
392 def copied(self, file):
393 return self._map.copymap.get(file, None)
393 return self._map.copymap.get(file, None)
394
394
395 def copies(self):
395 def copies(self):
396 return self._map.copymap
396 return self._map.copymap
397
397
398 def _addpath(self, f, state, mode, size, mtime):
398 def _addpath(self, f, state, mode, size, mtime):
399 oldstate = self[f]
399 oldstate = self[f]
400 if state == b'a' or oldstate == b'r':
400 if state == b'a' or oldstate == b'r':
401 scmutil.checkfilename(f)
401 scmutil.checkfilename(f)
402 if self._map.hastrackeddir(f):
402 if self._map.hastrackeddir(f):
403 raise error.Abort(
403 raise error.Abort(
404 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
404 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
405 )
405 )
406 # shadows
406 # shadows
407 for d in pathutil.finddirs(f):
407 for d in pathutil.finddirs(f):
408 if self._map.hastrackeddir(d):
408 if self._map.hastrackeddir(d):
409 break
409 break
410 entry = self._map.get(d)
410 entry = self._map.get(d)
411 if entry is not None and entry[0] != b'r':
411 if entry is not None and entry[0] != b'r':
412 raise error.Abort(
412 raise error.Abort(
413 _(b'file %r in dirstate clashes with %r')
413 _(b'file %r in dirstate clashes with %r')
414 % (pycompat.bytestr(d), pycompat.bytestr(f))
414 % (pycompat.bytestr(d), pycompat.bytestr(f))
415 )
415 )
416 self._dirty = True
416 self._dirty = True
417 self._updatedfiles.add(f)
417 self._updatedfiles.add(f)
418 self._map.addfile(f, oldstate, state, mode, size, mtime)
418 self._map.addfile(f, oldstate, state, mode, size, mtime)
419
419
420 def normal(self, f, parentfiledata=None):
420 def normal(self, f, parentfiledata=None):
421 '''Mark a file normal and clean.
421 '''Mark a file normal and clean.
422
422
423 parentfiledata: (mode, size, mtime) of the clean file
423 parentfiledata: (mode, size, mtime) of the clean file
424
424
425 parentfiledata should be computed from memory (for mode,
425 parentfiledata should be computed from memory (for mode,
426 size), as or close as possible from the point where we
426 size), as or close as possible from the point where we
427 determined the file was clean, to limit the risk of the
427 determined the file was clean, to limit the risk of the
428 file having been changed by an external process between the
428 file having been changed by an external process between the
429 moment where the file was determined to be clean and now.'''
429 moment where the file was determined to be clean and now.'''
430 if parentfiledata:
430 if parentfiledata:
431 (mode, size, mtime) = parentfiledata
431 (mode, size, mtime) = parentfiledata
432 else:
432 else:
433 s = os.lstat(self._join(f))
433 s = os.lstat(self._join(f))
434 mode = s.st_mode
434 mode = s.st_mode
435 size = s.st_size
435 size = s.st_size
436 mtime = s[stat.ST_MTIME]
436 mtime = s[stat.ST_MTIME]
437 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
437 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
438 self._map.copymap.pop(f, None)
438 self._map.copymap.pop(f, None)
439 if f in self._map.nonnormalset:
439 if f in self._map.nonnormalset:
440 self._map.nonnormalset.remove(f)
440 self._map.nonnormalset.remove(f)
441 if mtime > self._lastnormaltime:
441 if mtime > self._lastnormaltime:
442 # Remember the most recent modification timeslot for status(),
442 # Remember the most recent modification timeslot for status(),
443 # to make sure we won't miss future size-preserving file content
443 # to make sure we won't miss future size-preserving file content
444 # modifications that happen within the same timeslot.
444 # modifications that happen within the same timeslot.
445 self._lastnormaltime = mtime
445 self._lastnormaltime = mtime
446
446
447 def normallookup(self, f):
447 def normallookup(self, f):
448 '''Mark a file normal, but possibly dirty.'''
448 '''Mark a file normal, but possibly dirty.'''
449 if self._pl[1] != nullid:
449 if self._pl[1] != nullid:
450 # if there is a merge going on and the file was either
450 # if there is a merge going on and the file was either
451 # in state 'm' (-1) or coming from other parent (-2) before
451 # in state 'm' (-1) or coming from other parent (-2) before
452 # being removed, restore that state.
452 # being removed, restore that state.
453 entry = self._map.get(f)
453 entry = self._map.get(f)
454 if entry is not None:
454 if entry is not None:
455 if entry[0] == b'r' and entry[2] in (-1, -2):
455 if entry[0] == b'r' and entry[2] in (-1, -2):
456 source = self._map.copymap.get(f)
456 source = self._map.copymap.get(f)
457 if entry[2] == -1:
457 if entry[2] == -1:
458 self.merge(f)
458 self.merge(f)
459 elif entry[2] == -2:
459 elif entry[2] == -2:
460 self.otherparent(f)
460 self.otherparent(f)
461 if source:
461 if source:
462 self.copy(source, f)
462 self.copy(source, f)
463 return
463 return
464 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
464 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
465 return
465 return
466 self._addpath(f, b'n', 0, -1, -1)
466 self._addpath(f, b'n', 0, -1, -1)
467 self._map.copymap.pop(f, None)
467 self._map.copymap.pop(f, None)
468
468
469 def otherparent(self, f):
469 def otherparent(self, f):
470 '''Mark as coming from the other parent, always dirty.'''
470 '''Mark as coming from the other parent, always dirty.'''
471 if self._pl[1] == nullid:
471 if self._pl[1] == nullid:
472 raise error.Abort(
472 raise error.Abort(
473 _(b"setting %r to other parent only allowed in merges") % f
473 _(b"setting %r to other parent only allowed in merges") % f
474 )
474 )
475 if f in self and self[f] == b'n':
475 if f in self and self[f] == b'n':
476 # merge-like
476 # merge-like
477 self._addpath(f, b'm', 0, -2, -1)
477 self._addpath(f, b'm', 0, -2, -1)
478 else:
478 else:
479 # add-like
479 # add-like
480 self._addpath(f, b'n', 0, -2, -1)
480 self._addpath(f, b'n', 0, -2, -1)
481 self._map.copymap.pop(f, None)
481 self._map.copymap.pop(f, None)
482
482
483 def add(self, f):
483 def add(self, f):
484 '''Mark a file added.'''
484 '''Mark a file added.'''
485 self._addpath(f, b'a', 0, -1, -1)
485 self._addpath(f, b'a', 0, -1, -1)
486 self._map.copymap.pop(f, None)
486 self._map.copymap.pop(f, None)
487
487
488 def remove(self, f):
488 def remove(self, f):
489 '''Mark a file removed.'''
489 '''Mark a file removed.'''
490 self._dirty = True
490 self._dirty = True
491 oldstate = self[f]
491 oldstate = self[f]
492 size = 0
492 size = 0
493 if self._pl[1] != nullid:
493 if self._pl[1] != nullid:
494 entry = self._map.get(f)
494 entry = self._map.get(f)
495 if entry is not None:
495 if entry is not None:
496 # backup the previous state
496 # backup the previous state
497 if entry[0] == b'm': # merge
497 if entry[0] == b'm': # merge
498 size = -1
498 size = -1
499 elif entry[0] == b'n' and entry[2] == -2: # other parent
499 elif entry[0] == b'n' and entry[2] == -2: # other parent
500 size = -2
500 size = -2
501 self._map.otherparentset.add(f)
501 self._map.otherparentset.add(f)
502 self._updatedfiles.add(f)
502 self._updatedfiles.add(f)
503 self._map.removefile(f, oldstate, size)
503 self._map.removefile(f, oldstate, size)
504 if size == 0:
504 if size == 0:
505 self._map.copymap.pop(f, None)
505 self._map.copymap.pop(f, None)
506
506
507 def merge(self, f):
507 def merge(self, f):
508 '''Mark a file merged.'''
508 '''Mark a file merged.'''
509 if self._pl[1] == nullid:
509 if self._pl[1] == nullid:
510 return self.normallookup(f)
510 return self.normallookup(f)
511 return self.otherparent(f)
511 return self.otherparent(f)
512
512
513 def drop(self, f):
513 def drop(self, f):
514 '''Drop a file from the dirstate'''
514 '''Drop a file from the dirstate'''
515 oldstate = self[f]
515 oldstate = self[f]
516 if self._map.dropfile(f, oldstate):
516 if self._map.dropfile(f, oldstate):
517 self._dirty = True
517 self._dirty = True
518 self._updatedfiles.add(f)
518 self._updatedfiles.add(f)
519 self._map.copymap.pop(f, None)
519 self._map.copymap.pop(f, None)
520
520
521 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
521 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
522 if exists is None:
522 if exists is None:
523 exists = os.path.lexists(os.path.join(self._root, path))
523 exists = os.path.lexists(os.path.join(self._root, path))
524 if not exists:
524 if not exists:
525 # Maybe a path component exists
525 # Maybe a path component exists
526 if not ignoremissing and b'/' in path:
526 if not ignoremissing and b'/' in path:
527 d, f = path.rsplit(b'/', 1)
527 d, f = path.rsplit(b'/', 1)
528 d = self._normalize(d, False, ignoremissing, None)
528 d = self._normalize(d, False, ignoremissing, None)
529 folded = d + b"/" + f
529 folded = d + b"/" + f
530 else:
530 else:
531 # No path components, preserve original case
531 # No path components, preserve original case
532 folded = path
532 folded = path
533 else:
533 else:
534 # recursively normalize leading directory components
534 # recursively normalize leading directory components
535 # against dirstate
535 # against dirstate
536 if b'/' in normed:
536 if b'/' in normed:
537 d, f = normed.rsplit(b'/', 1)
537 d, f = normed.rsplit(b'/', 1)
538 d = self._normalize(d, False, ignoremissing, True)
538 d = self._normalize(d, False, ignoremissing, True)
539 r = self._root + b"/" + d
539 r = self._root + b"/" + d
540 folded = d + b"/" + util.fspath(f, r)
540 folded = d + b"/" + util.fspath(f, r)
541 else:
541 else:
542 folded = util.fspath(normed, self._root)
542 folded = util.fspath(normed, self._root)
543 storemap[normed] = folded
543 storemap[normed] = folded
544
544
545 return folded
545 return folded
546
546
547 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
547 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
548 normed = util.normcase(path)
548 normed = util.normcase(path)
549 folded = self._map.filefoldmap.get(normed, None)
549 folded = self._map.filefoldmap.get(normed, None)
550 if folded is None:
550 if folded is None:
551 if isknown:
551 if isknown:
552 folded = path
552 folded = path
553 else:
553 else:
554 folded = self._discoverpath(
554 folded = self._discoverpath(
555 path, normed, ignoremissing, exists, self._map.filefoldmap
555 path, normed, ignoremissing, exists, self._map.filefoldmap
556 )
556 )
557 return folded
557 return folded
558
558
559 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
559 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
560 normed = util.normcase(path)
560 normed = util.normcase(path)
561 folded = self._map.filefoldmap.get(normed, None)
561 folded = self._map.filefoldmap.get(normed, None)
562 if folded is None:
562 if folded is None:
563 folded = self._map.dirfoldmap.get(normed, None)
563 folded = self._map.dirfoldmap.get(normed, None)
564 if folded is None:
564 if folded is None:
565 if isknown:
565 if isknown:
566 folded = path
566 folded = path
567 else:
567 else:
568 # store discovered result in dirfoldmap so that future
568 # store discovered result in dirfoldmap so that future
569 # normalizefile calls don't start matching directories
569 # normalizefile calls don't start matching directories
570 folded = self._discoverpath(
570 folded = self._discoverpath(
571 path, normed, ignoremissing, exists, self._map.dirfoldmap
571 path, normed, ignoremissing, exists, self._map.dirfoldmap
572 )
572 )
573 return folded
573 return folded
574
574
575 def normalize(self, path, isknown=False, ignoremissing=False):
575 def normalize(self, path, isknown=False, ignoremissing=False):
576 '''
576 '''
577 normalize the case of a pathname when on a casefolding filesystem
577 normalize the case of a pathname when on a casefolding filesystem
578
578
579 isknown specifies whether the filename came from walking the
579 isknown specifies whether the filename came from walking the
580 disk, to avoid extra filesystem access.
580 disk, to avoid extra filesystem access.
581
581
582 If ignoremissing is True, missing path are returned
582 If ignoremissing is True, missing path are returned
583 unchanged. Otherwise, we try harder to normalize possibly
583 unchanged. Otherwise, we try harder to normalize possibly
584 existing path components.
584 existing path components.
585
585
586 The normalized case is determined based on the following precedence:
586 The normalized case is determined based on the following precedence:
587
587
588 - version of name already stored in the dirstate
588 - version of name already stored in the dirstate
589 - version of name stored on disk
589 - version of name stored on disk
590 - version provided via command arguments
590 - version provided via command arguments
591 '''
591 '''
592
592
593 if self._checkcase:
593 if self._checkcase:
594 return self._normalize(path, isknown, ignoremissing)
594 return self._normalize(path, isknown, ignoremissing)
595 return path
595 return path
596
596
597 def clear(self):
597 def clear(self):
598 self._map.clear()
598 self._map.clear()
599 self._lastnormaltime = 0
599 self._lastnormaltime = 0
600 self._updatedfiles.clear()
600 self._updatedfiles.clear()
601 self._dirty = True
601 self._dirty = True
602
602
603 def rebuild(self, parent, allfiles, changedfiles=None):
603 def rebuild(self, parent, allfiles, changedfiles=None):
604 if changedfiles is None:
604 if changedfiles is None:
605 # Rebuild entire dirstate
605 # Rebuild entire dirstate
606 changedfiles = allfiles
606 changedfiles = allfiles
607 lastnormaltime = self._lastnormaltime
607 lastnormaltime = self._lastnormaltime
608 self.clear()
608 self.clear()
609 self._lastnormaltime = lastnormaltime
609 self._lastnormaltime = lastnormaltime
610
610
611 if self._origpl is None:
611 if self._origpl is None:
612 self._origpl = self._pl
612 self._origpl = self._pl
613 self._map.setparents(parent, nullid)
613 self._map.setparents(parent, nullid)
614 for f in changedfiles:
614 for f in changedfiles:
615 if f in allfiles:
615 if f in allfiles:
616 self.normallookup(f)
616 self.normallookup(f)
617 else:
617 else:
618 self.drop(f)
618 self.drop(f)
619
619
620 self._dirty = True
620 self._dirty = True
621
621
622 def identity(self):
622 def identity(self):
623 '''Return identity of dirstate itself to detect changing in storage
623 '''Return identity of dirstate itself to detect changing in storage
624
624
625 If identity of previous dirstate is equal to this, writing
625 If identity of previous dirstate is equal to this, writing
626 changes based on the former dirstate out can keep consistency.
626 changes based on the former dirstate out can keep consistency.
627 '''
627 '''
628 return self._map.identity
628 return self._map.identity
629
629
630 def write(self, tr):
630 def write(self, tr):
631 if not self._dirty:
631 if not self._dirty:
632 return
632 return
633
633
634 filename = self._filename
634 filename = self._filename
635 if tr:
635 if tr:
636 # 'dirstate.write()' is not only for writing in-memory
636 # 'dirstate.write()' is not only for writing in-memory
637 # changes out, but also for dropping ambiguous timestamp.
637 # changes out, but also for dropping ambiguous timestamp.
638 # delayed writing re-raise "ambiguous timestamp issue".
638 # delayed writing re-raise "ambiguous timestamp issue".
639 # See also the wiki page below for detail:
639 # See also the wiki page below for detail:
640 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
640 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
641
641
642 # emulate dropping timestamp in 'parsers.pack_dirstate'
642 # emulate dropping timestamp in 'parsers.pack_dirstate'
643 now = _getfsnow(self._opener)
643 now = _getfsnow(self._opener)
644 self._map.clearambiguoustimes(self._updatedfiles, now)
644 self._map.clearambiguoustimes(self._updatedfiles, now)
645
645
646 # emulate that all 'dirstate.normal' results are written out
646 # emulate that all 'dirstate.normal' results are written out
647 self._lastnormaltime = 0
647 self._lastnormaltime = 0
648 self._updatedfiles.clear()
648 self._updatedfiles.clear()
649
649
650 # delay writing in-memory changes out
650 # delay writing in-memory changes out
651 tr.addfilegenerator(
651 tr.addfilegenerator(
652 b'dirstate',
652 b'dirstate',
653 (self._filename,),
653 (self._filename,),
654 self._writedirstate,
654 self._writedirstate,
655 location=b'plain',
655 location=b'plain',
656 )
656 )
657 return
657 return
658
658
659 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
659 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
660 self._writedirstate(st)
660 self._writedirstate(st)
661
661
662 def addparentchangecallback(self, category, callback):
662 def addparentchangecallback(self, category, callback):
663 """add a callback to be called when the wd parents are changed
663 """add a callback to be called when the wd parents are changed
664
664
665 Callback will be called with the following arguments:
665 Callback will be called with the following arguments:
666 dirstate, (oldp1, oldp2), (newp1, newp2)
666 dirstate, (oldp1, oldp2), (newp1, newp2)
667
667
668 Category is a unique identifier to allow overwriting an old callback
668 Category is a unique identifier to allow overwriting an old callback
669 with a newer callback.
669 with a newer callback.
670 """
670 """
671 self._plchangecallbacks[category] = callback
671 self._plchangecallbacks[category] = callback
672
672
673 def _writedirstate(self, st):
673 def _writedirstate(self, st):
674 # notify callbacks about parents change
674 # notify callbacks about parents change
675 if self._origpl is not None and self._origpl != self._pl:
675 if self._origpl is not None and self._origpl != self._pl:
676 for c, callback in sorted(
676 for c, callback in sorted(
677 pycompat.iteritems(self._plchangecallbacks)
677 pycompat.iteritems(self._plchangecallbacks)
678 ):
678 ):
679 callback(self, self._origpl, self._pl)
679 callback(self, self._origpl, self._pl)
680 self._origpl = None
680 self._origpl = None
681 # use the modification time of the newly created temporary file as the
681 # use the modification time of the newly created temporary file as the
682 # filesystem's notion of 'now'
682 # filesystem's notion of 'now'
683 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
683 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
684
684
685 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
685 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
686 # timestamp of each entries in dirstate, because of 'now > mtime'
686 # timestamp of each entries in dirstate, because of 'now > mtime'
687 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
687 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
688 if delaywrite > 0:
688 if delaywrite > 0:
689 # do we have any files to delay for?
689 # do we have any files to delay for?
690 for f, e in pycompat.iteritems(self._map):
690 for f, e in pycompat.iteritems(self._map):
691 if e[0] == b'n' and e[3] == now:
691 if e[0] == b'n' and e[3] == now:
692 import time # to avoid useless import
692 import time # to avoid useless import
693
693
694 # rather than sleep n seconds, sleep until the next
694 # rather than sleep n seconds, sleep until the next
695 # multiple of n seconds
695 # multiple of n seconds
696 clock = time.time()
696 clock = time.time()
697 start = int(clock) - (int(clock) % delaywrite)
697 start = int(clock) - (int(clock) % delaywrite)
698 end = start + delaywrite
698 end = start + delaywrite
699 time.sleep(end - clock)
699 time.sleep(end - clock)
700 now = end # trust our estimate that the end is near now
700 now = end # trust our estimate that the end is near now
701 break
701 break
702
702
703 self._map.write(st, now)
703 self._map.write(st, now)
704 self._lastnormaltime = 0
704 self._lastnormaltime = 0
705 self._dirty = False
705 self._dirty = False
706
706
707 def _dirignore(self, f):
707 def _dirignore(self, f):
708 if self._ignore(f):
708 if self._ignore(f):
709 return True
709 return True
710 for p in pathutil.finddirs(f):
710 for p in pathutil.finddirs(f):
711 if self._ignore(p):
711 if self._ignore(p):
712 return True
712 return True
713 return False
713 return False
714
714
715 def _ignorefiles(self):
715 def _ignorefiles(self):
716 files = []
716 files = []
717 if os.path.exists(self._join(b'.hgignore')):
717 if os.path.exists(self._join(b'.hgignore')):
718 files.append(self._join(b'.hgignore'))
718 files.append(self._join(b'.hgignore'))
719 for name, path in self._ui.configitems(b"ui"):
719 for name, path in self._ui.configitems(b"ui"):
720 if name == b'ignore' or name.startswith(b'ignore.'):
720 if name == b'ignore' or name.startswith(b'ignore.'):
721 # we need to use os.path.join here rather than self._join
721 # we need to use os.path.join here rather than self._join
722 # because path is arbitrary and user-specified
722 # because path is arbitrary and user-specified
723 files.append(os.path.join(self._rootdir, util.expandpath(path)))
723 files.append(os.path.join(self._rootdir, util.expandpath(path)))
724 return files
724 return files
725
725
726 def _ignorefileandline(self, f):
726 def _ignorefileandline(self, f):
727 files = collections.deque(self._ignorefiles())
727 files = collections.deque(self._ignorefiles())
728 visited = set()
728 visited = set()
729 while files:
729 while files:
730 i = files.popleft()
730 i = files.popleft()
731 patterns = matchmod.readpatternfile(
731 patterns = matchmod.readpatternfile(
732 i, self._ui.warn, sourceinfo=True
732 i, self._ui.warn, sourceinfo=True
733 )
733 )
734 for pattern, lineno, line in patterns:
734 for pattern, lineno, line in patterns:
735 kind, p = matchmod._patsplit(pattern, b'glob')
735 kind, p = matchmod._patsplit(pattern, b'glob')
736 if kind == b"subinclude":
736 if kind == b"subinclude":
737 if p not in visited:
737 if p not in visited:
738 files.append(p)
738 files.append(p)
739 continue
739 continue
740 m = matchmod.match(
740 m = matchmod.match(
741 self._root, b'', [], [pattern], warn=self._ui.warn
741 self._root, b'', [], [pattern], warn=self._ui.warn
742 )
742 )
743 if m(f):
743 if m(f):
744 return (i, lineno, line)
744 return (i, lineno, line)
745 visited.add(i)
745 visited.add(i)
746 return (None, -1, b"")
746 return (None, -1, b"")
747
747
748 def _walkexplicit(self, match, subrepos):
748 def _walkexplicit(self, match, subrepos):
749 '''Get stat data about the files explicitly specified by match.
749 '''Get stat data about the files explicitly specified by match.
750
750
751 Return a triple (results, dirsfound, dirsnotfound).
751 Return a triple (results, dirsfound, dirsnotfound).
752 - results is a mapping from filename to stat result. It also contains
752 - results is a mapping from filename to stat result. It also contains
753 listings mapping subrepos and .hg to None.
753 listings mapping subrepos and .hg to None.
754 - dirsfound is a list of files found to be directories.
754 - dirsfound is a list of files found to be directories.
755 - dirsnotfound is a list of files that the dirstate thinks are
755 - dirsnotfound is a list of files that the dirstate thinks are
756 directories and that were not found.'''
756 directories and that were not found.'''
757
757
758 def badtype(mode):
758 def badtype(mode):
759 kind = _(b'unknown')
759 kind = _(b'unknown')
760 if stat.S_ISCHR(mode):
760 if stat.S_ISCHR(mode):
761 kind = _(b'character device')
761 kind = _(b'character device')
762 elif stat.S_ISBLK(mode):
762 elif stat.S_ISBLK(mode):
763 kind = _(b'block device')
763 kind = _(b'block device')
764 elif stat.S_ISFIFO(mode):
764 elif stat.S_ISFIFO(mode):
765 kind = _(b'fifo')
765 kind = _(b'fifo')
766 elif stat.S_ISSOCK(mode):
766 elif stat.S_ISSOCK(mode):
767 kind = _(b'socket')
767 kind = _(b'socket')
768 elif stat.S_ISDIR(mode):
768 elif stat.S_ISDIR(mode):
769 kind = _(b'directory')
769 kind = _(b'directory')
770 return _(b'unsupported file type (type is %s)') % kind
770 return _(b'unsupported file type (type is %s)') % kind
771
771
772 matchedir = match.explicitdir
772 matchedir = match.explicitdir
773 badfn = match.bad
773 badfn = match.bad
774 dmap = self._map
774 dmap = self._map
775 lstat = os.lstat
775 lstat = os.lstat
776 getkind = stat.S_IFMT
776 getkind = stat.S_IFMT
777 dirkind = stat.S_IFDIR
777 dirkind = stat.S_IFDIR
778 regkind = stat.S_IFREG
778 regkind = stat.S_IFREG
779 lnkkind = stat.S_IFLNK
779 lnkkind = stat.S_IFLNK
780 join = self._join
780 join = self._join
781 dirsfound = []
781 dirsfound = []
782 foundadd = dirsfound.append
782 foundadd = dirsfound.append
783 dirsnotfound = []
783 dirsnotfound = []
784 notfoundadd = dirsnotfound.append
784 notfoundadd = dirsnotfound.append
785
785
786 if not match.isexact() and self._checkcase:
786 if not match.isexact() and self._checkcase:
787 normalize = self._normalize
787 normalize = self._normalize
788 else:
788 else:
789 normalize = None
789 normalize = None
790
790
791 files = sorted(match.files())
791 files = sorted(match.files())
792 subrepos.sort()
792 subrepos.sort()
793 i, j = 0, 0
793 i, j = 0, 0
794 while i < len(files) and j < len(subrepos):
794 while i < len(files) and j < len(subrepos):
795 subpath = subrepos[j] + b"/"
795 subpath = subrepos[j] + b"/"
796 if files[i] < subpath:
796 if files[i] < subpath:
797 i += 1
797 i += 1
798 continue
798 continue
799 while i < len(files) and files[i].startswith(subpath):
799 while i < len(files) and files[i].startswith(subpath):
800 del files[i]
800 del files[i]
801 j += 1
801 j += 1
802
802
803 if not files or b'' in files:
803 if not files or b'' in files:
804 files = [b'']
804 files = [b'']
805 # constructing the foldmap is expensive, so don't do it for the
805 # constructing the foldmap is expensive, so don't do it for the
806 # common case where files is ['']
806 # common case where files is ['']
807 normalize = None
807 normalize = None
808 results = dict.fromkeys(subrepos)
808 results = dict.fromkeys(subrepos)
809 results[b'.hg'] = None
809 results[b'.hg'] = None
810
810
811 for ff in files:
811 for ff in files:
812 if normalize:
812 if normalize:
813 nf = normalize(ff, False, True)
813 nf = normalize(ff, False, True)
814 else:
814 else:
815 nf = ff
815 nf = ff
816 if nf in results:
816 if nf in results:
817 continue
817 continue
818
818
819 try:
819 try:
820 st = lstat(join(nf))
820 st = lstat(join(nf))
821 kind = getkind(st.st_mode)
821 kind = getkind(st.st_mode)
822 if kind == dirkind:
822 if kind == dirkind:
823 if nf in dmap:
823 if nf in dmap:
824 # file replaced by dir on disk but still in dirstate
824 # file replaced by dir on disk but still in dirstate
825 results[nf] = None
825 results[nf] = None
826 if matchedir:
826 if matchedir:
827 matchedir(nf)
827 matchedir(nf)
828 foundadd((nf, ff))
828 foundadd((nf, ff))
829 elif kind == regkind or kind == lnkkind:
829 elif kind == regkind or kind == lnkkind:
830 results[nf] = st
830 results[nf] = st
831 else:
831 else:
832 badfn(ff, badtype(kind))
832 badfn(ff, badtype(kind))
833 if nf in dmap:
833 if nf in dmap:
834 results[nf] = None
834 results[nf] = None
835 except OSError as inst: # nf not found on disk - it is dirstate only
835 except OSError as inst: # nf not found on disk - it is dirstate only
836 if nf in dmap: # does it exactly match a missing file?
836 if nf in dmap: # does it exactly match a missing file?
837 results[nf] = None
837 results[nf] = None
838 else: # does it match a missing directory?
838 else: # does it match a missing directory?
839 if self._map.hasdir(nf):
839 if self._map.hasdir(nf):
840 if matchedir:
840 if matchedir:
841 matchedir(nf)
841 matchedir(nf)
842 notfoundadd(nf)
842 notfoundadd(nf)
843 else:
843 else:
844 badfn(ff, encoding.strtolocal(inst.strerror))
844 badfn(ff, encoding.strtolocal(inst.strerror))
845
845
846 # match.files() may contain explicitly-specified paths that shouldn't
846 # match.files() may contain explicitly-specified paths that shouldn't
847 # be taken; drop them from the list of files found. dirsfound/notfound
847 # be taken; drop them from the list of files found. dirsfound/notfound
848 # aren't filtered here because they will be tested later.
848 # aren't filtered here because they will be tested later.
849 if match.anypats():
849 if match.anypats():
850 for f in list(results):
850 for f in list(results):
851 if f == b'.hg' or f in subrepos:
851 if f == b'.hg' or f in subrepos:
852 # keep sentinel to disable further out-of-repo walks
852 # keep sentinel to disable further out-of-repo walks
853 continue
853 continue
854 if not match(f):
854 if not match(f):
855 del results[f]
855 del results[f]
856
856
857 # Case insensitive filesystems cannot rely on lstat() failing to detect
857 # Case insensitive filesystems cannot rely on lstat() failing to detect
858 # a case-only rename. Prune the stat object for any file that does not
858 # a case-only rename. Prune the stat object for any file that does not
859 # match the case in the filesystem, if there are multiple files that
859 # match the case in the filesystem, if there are multiple files that
860 # normalize to the same path.
860 # normalize to the same path.
861 if match.isexact() and self._checkcase:
861 if match.isexact() and self._checkcase:
862 normed = {}
862 normed = {}
863
863
864 for f, st in pycompat.iteritems(results):
864 for f, st in pycompat.iteritems(results):
865 if st is None:
865 if st is None:
866 continue
866 continue
867
867
868 nc = util.normcase(f)
868 nc = util.normcase(f)
869 paths = normed.get(nc)
869 paths = normed.get(nc)
870
870
871 if paths is None:
871 if paths is None:
872 paths = set()
872 paths = set()
873 normed[nc] = paths
873 normed[nc] = paths
874
874
875 paths.add(f)
875 paths.add(f)
876
876
877 for norm, paths in pycompat.iteritems(normed):
877 for norm, paths in pycompat.iteritems(normed):
878 if len(paths) > 1:
878 if len(paths) > 1:
879 for path in paths:
879 for path in paths:
880 folded = self._discoverpath(
880 folded = self._discoverpath(
881 path, norm, True, None, self._map.dirfoldmap
881 path, norm, True, None, self._map.dirfoldmap
882 )
882 )
883 if path != folded:
883 if path != folded:
884 results[path] = None
884 results[path] = None
885
885
886 return results, dirsfound, dirsnotfound
886 return results, dirsfound, dirsnotfound
887
887
888 def walk(self, match, subrepos, unknown, ignored, full=True):
888 def walk(self, match, subrepos, unknown, ignored, full=True):
889 '''
889 '''
890 Walk recursively through the directory tree, finding all files
890 Walk recursively through the directory tree, finding all files
891 matched by match.
891 matched by match.
892
892
893 If full is False, maybe skip some known-clean files.
893 If full is False, maybe skip some known-clean files.
894
894
895 Return a dict mapping filename to stat-like object (either
895 Return a dict mapping filename to stat-like object (either
896 mercurial.osutil.stat instance or return value of os.stat()).
896 mercurial.osutil.stat instance or return value of os.stat()).
897
897
898 '''
898 '''
899 # full is a flag that extensions that hook into walk can use -- this
899 # full is a flag that extensions that hook into walk can use -- this
900 # implementation doesn't use it at all. This satisfies the contract
900 # implementation doesn't use it at all. This satisfies the contract
901 # because we only guarantee a "maybe".
901 # because we only guarantee a "maybe".
902
902
903 if ignored:
903 if ignored:
904 ignore = util.never
904 ignore = util.never
905 dirignore = util.never
905 dirignore = util.never
906 elif unknown:
906 elif unknown:
907 ignore = self._ignore
907 ignore = self._ignore
908 dirignore = self._dirignore
908 dirignore = self._dirignore
909 else:
909 else:
910 # if not unknown and not ignored, drop dir recursion and step 2
910 # if not unknown and not ignored, drop dir recursion and step 2
911 ignore = util.always
911 ignore = util.always
912 dirignore = util.always
912 dirignore = util.always
913
913
914 matchfn = match.matchfn
914 matchfn = match.matchfn
915 matchalways = match.always()
915 matchalways = match.always()
916 matchtdir = match.traversedir
916 matchtdir = match.traversedir
917 dmap = self._map
917 dmap = self._map
918 listdir = util.listdir
918 listdir = util.listdir
919 lstat = os.lstat
919 lstat = os.lstat
920 dirkind = stat.S_IFDIR
920 dirkind = stat.S_IFDIR
921 regkind = stat.S_IFREG
921 regkind = stat.S_IFREG
922 lnkkind = stat.S_IFLNK
922 lnkkind = stat.S_IFLNK
923 join = self._join
923 join = self._join
924
924
925 exact = skipstep3 = False
925 exact = skipstep3 = False
926 if match.isexact(): # match.exact
926 if match.isexact(): # match.exact
927 exact = True
927 exact = True
928 dirignore = util.always # skip step 2
928 dirignore = util.always # skip step 2
929 elif match.prefix(): # match.match, no patterns
929 elif match.prefix(): # match.match, no patterns
930 skipstep3 = True
930 skipstep3 = True
931
931
932 if not exact and self._checkcase:
932 if not exact and self._checkcase:
933 normalize = self._normalize
933 normalize = self._normalize
934 normalizefile = self._normalizefile
934 normalizefile = self._normalizefile
935 skipstep3 = False
935 skipstep3 = False
936 else:
936 else:
937 normalize = self._normalize
937 normalize = self._normalize
938 normalizefile = None
938 normalizefile = None
939
939
940 # step 1: find all explicit files
940 # step 1: find all explicit files
941 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
941 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
942 if matchtdir:
943 for d in work:
944 matchtdir(d[0])
945 for d in dirsnotfound:
946 matchtdir(d)
942
947
943 skipstep3 = skipstep3 and not (work or dirsnotfound)
948 skipstep3 = skipstep3 and not (work or dirsnotfound)
944 work = [d for d in work if not dirignore(d[0])]
949 work = [d for d in work if not dirignore(d[0])]
945
950
946 # step 2: visit subdirectories
951 # step 2: visit subdirectories
947 def traverse(work, alreadynormed):
952 def traverse(work, alreadynormed):
948 wadd = work.append
953 wadd = work.append
949 while work:
954 while work:
950 tracing.counter('dirstate.walk work', len(work))
955 tracing.counter('dirstate.walk work', len(work))
951 nd = work.pop()
956 nd = work.pop()
952 visitentries = match.visitchildrenset(nd)
957 visitentries = match.visitchildrenset(nd)
953 if not visitentries:
958 if not visitentries:
954 continue
959 continue
955 if visitentries == b'this' or visitentries == b'all':
960 if visitentries == b'this' or visitentries == b'all':
956 visitentries = None
961 visitentries = None
957 skip = None
962 skip = None
958 if nd != b'':
963 if nd != b'':
959 skip = b'.hg'
964 skip = b'.hg'
960 try:
965 try:
961 with tracing.log('dirstate.walk.traverse listdir %s', nd):
966 with tracing.log('dirstate.walk.traverse listdir %s', nd):
962 entries = listdir(join(nd), stat=True, skip=skip)
967 entries = listdir(join(nd), stat=True, skip=skip)
963 except OSError as inst:
968 except OSError as inst:
964 if inst.errno in (errno.EACCES, errno.ENOENT):
969 if inst.errno in (errno.EACCES, errno.ENOENT):
965 match.bad(
970 match.bad(
966 self.pathto(nd), encoding.strtolocal(inst.strerror)
971 self.pathto(nd), encoding.strtolocal(inst.strerror)
967 )
972 )
968 continue
973 continue
969 raise
974 raise
970 for f, kind, st in entries:
975 for f, kind, st in entries:
971 # Some matchers may return files in the visitentries set,
976 # Some matchers may return files in the visitentries set,
972 # instead of 'this', if the matcher explicitly mentions them
977 # instead of 'this', if the matcher explicitly mentions them
973 # and is not an exactmatcher. This is acceptable; we do not
978 # and is not an exactmatcher. This is acceptable; we do not
974 # make any hard assumptions about file-or-directory below
979 # make any hard assumptions about file-or-directory below
975 # based on the presence of `f` in visitentries. If
980 # based on the presence of `f` in visitentries. If
976 # visitchildrenset returned a set, we can always skip the
981 # visitchildrenset returned a set, we can always skip the
977 # entries *not* in the set it provided regardless of whether
982 # entries *not* in the set it provided regardless of whether
978 # they're actually a file or a directory.
983 # they're actually a file or a directory.
979 if visitentries and f not in visitentries:
984 if visitentries and f not in visitentries:
980 continue
985 continue
981 if normalizefile:
986 if normalizefile:
982 # even though f might be a directory, we're only
987 # even though f might be a directory, we're only
983 # interested in comparing it to files currently in the
988 # interested in comparing it to files currently in the
984 # dmap -- therefore normalizefile is enough
989 # dmap -- therefore normalizefile is enough
985 nf = normalizefile(
990 nf = normalizefile(
986 nd and (nd + b"/" + f) or f, True, True
991 nd and (nd + b"/" + f) or f, True, True
987 )
992 )
988 else:
993 else:
989 nf = nd and (nd + b"/" + f) or f
994 nf = nd and (nd + b"/" + f) or f
990 if nf not in results:
995 if nf not in results:
991 if kind == dirkind:
996 if kind == dirkind:
992 if not ignore(nf):
997 if not ignore(nf):
993 if matchtdir:
998 if matchtdir:
994 matchtdir(nf)
999 matchtdir(nf)
995 wadd(nf)
1000 wadd(nf)
996 if nf in dmap and (matchalways or matchfn(nf)):
1001 if nf in dmap and (matchalways or matchfn(nf)):
997 results[nf] = None
1002 results[nf] = None
998 elif kind == regkind or kind == lnkkind:
1003 elif kind == regkind or kind == lnkkind:
999 if nf in dmap:
1004 if nf in dmap:
1000 if matchalways or matchfn(nf):
1005 if matchalways or matchfn(nf):
1001 results[nf] = st
1006 results[nf] = st
1002 elif (matchalways or matchfn(nf)) and not ignore(
1007 elif (matchalways or matchfn(nf)) and not ignore(
1003 nf
1008 nf
1004 ):
1009 ):
1005 # unknown file -- normalize if necessary
1010 # unknown file -- normalize if necessary
1006 if not alreadynormed:
1011 if not alreadynormed:
1007 nf = normalize(nf, False, True)
1012 nf = normalize(nf, False, True)
1008 results[nf] = st
1013 results[nf] = st
1009 elif nf in dmap and (matchalways or matchfn(nf)):
1014 elif nf in dmap and (matchalways or matchfn(nf)):
1010 results[nf] = None
1015 results[nf] = None
1011
1016
1012 for nd, d in work:
1017 for nd, d in work:
1013 # alreadynormed means that processwork doesn't have to do any
1018 # alreadynormed means that processwork doesn't have to do any
1014 # expensive directory normalization
1019 # expensive directory normalization
1015 alreadynormed = not normalize or nd == d
1020 alreadynormed = not normalize or nd == d
1016 traverse([d], alreadynormed)
1021 traverse([d], alreadynormed)
1017
1022
1018 for s in subrepos:
1023 for s in subrepos:
1019 del results[s]
1024 del results[s]
1020 del results[b'.hg']
1025 del results[b'.hg']
1021
1026
1022 # step 3: visit remaining files from dmap
1027 # step 3: visit remaining files from dmap
1023 if not skipstep3 and not exact:
1028 if not skipstep3 and not exact:
1024 # If a dmap file is not in results yet, it was either
1029 # If a dmap file is not in results yet, it was either
1025 # a) not matching matchfn b) ignored, c) missing, or d) under a
1030 # a) not matching matchfn b) ignored, c) missing, or d) under a
1026 # symlink directory.
1031 # symlink directory.
1027 if not results and matchalways:
1032 if not results and matchalways:
1028 visit = [f for f in dmap]
1033 visit = [f for f in dmap]
1029 else:
1034 else:
1030 visit = [f for f in dmap if f not in results and matchfn(f)]
1035 visit = [f for f in dmap if f not in results and matchfn(f)]
1031 visit.sort()
1036 visit.sort()
1032
1037
1033 if unknown:
1038 if unknown:
1034 # unknown == True means we walked all dirs under the roots
1039 # unknown == True means we walked all dirs under the roots
1035 # that wasn't ignored, and everything that matched was stat'ed
1040 # that wasn't ignored, and everything that matched was stat'ed
1036 # and is already in results.
1041 # and is already in results.
1037 # The rest must thus be ignored or under a symlink.
1042 # The rest must thus be ignored or under a symlink.
1038 audit_path = pathutil.pathauditor(self._root, cached=True)
1043 audit_path = pathutil.pathauditor(self._root, cached=True)
1039
1044
1040 for nf in iter(visit):
1045 for nf in iter(visit):
1041 # If a stat for the same file was already added with a
1046 # If a stat for the same file was already added with a
1042 # different case, don't add one for this, since that would
1047 # different case, don't add one for this, since that would
1043 # make it appear as if the file exists under both names
1048 # make it appear as if the file exists under both names
1044 # on disk.
1049 # on disk.
1045 if (
1050 if (
1046 normalizefile
1051 normalizefile
1047 and normalizefile(nf, True, True) in results
1052 and normalizefile(nf, True, True) in results
1048 ):
1053 ):
1049 results[nf] = None
1054 results[nf] = None
1050 # Report ignored items in the dmap as long as they are not
1055 # Report ignored items in the dmap as long as they are not
1051 # under a symlink directory.
1056 # under a symlink directory.
1052 elif audit_path.check(nf):
1057 elif audit_path.check(nf):
1053 try:
1058 try:
1054 results[nf] = lstat(join(nf))
1059 results[nf] = lstat(join(nf))
1055 # file was just ignored, no links, and exists
1060 # file was just ignored, no links, and exists
1056 except OSError:
1061 except OSError:
1057 # file doesn't exist
1062 # file doesn't exist
1058 results[nf] = None
1063 results[nf] = None
1059 else:
1064 else:
1060 # It's either missing or under a symlink directory
1065 # It's either missing or under a symlink directory
1061 # which we in this case report as missing
1066 # which we in this case report as missing
1062 results[nf] = None
1067 results[nf] = None
1063 else:
1068 else:
1064 # We may not have walked the full directory tree above,
1069 # We may not have walked the full directory tree above,
1065 # so stat and check everything we missed.
1070 # so stat and check everything we missed.
1066 iv = iter(visit)
1071 iv = iter(visit)
1067 for st in util.statfiles([join(i) for i in visit]):
1072 for st in util.statfiles([join(i) for i in visit]):
1068 results[next(iv)] = st
1073 results[next(iv)] = st
1069 return results
1074 return results
1070
1075
1071 def status(self, match, subrepos, ignored, clean, unknown):
1076 def status(self, match, subrepos, ignored, clean, unknown):
1072 '''Determine the status of the working copy relative to the
1077 '''Determine the status of the working copy relative to the
1073 dirstate and return a pair of (unsure, status), where status is of type
1078 dirstate and return a pair of (unsure, status), where status is of type
1074 scmutil.status and:
1079 scmutil.status and:
1075
1080
1076 unsure:
1081 unsure:
1077 files that might have been modified since the dirstate was
1082 files that might have been modified since the dirstate was
1078 written, but need to be read to be sure (size is the same
1083 written, but need to be read to be sure (size is the same
1079 but mtime differs)
1084 but mtime differs)
1080 status.modified:
1085 status.modified:
1081 files that have definitely been modified since the dirstate
1086 files that have definitely been modified since the dirstate
1082 was written (different size or mode)
1087 was written (different size or mode)
1083 status.clean:
1088 status.clean:
1084 files that have definitely not been modified since the
1089 files that have definitely not been modified since the
1085 dirstate was written
1090 dirstate was written
1086 '''
1091 '''
1087 listignored, listclean, listunknown = ignored, clean, unknown
1092 listignored, listclean, listunknown = ignored, clean, unknown
1088 lookup, modified, added, unknown, ignored = [], [], [], [], []
1093 lookup, modified, added, unknown, ignored = [], [], [], [], []
1089 removed, deleted, clean = [], [], []
1094 removed, deleted, clean = [], [], []
1090
1095
1091 dmap = self._map
1096 dmap = self._map
1092 dmap.preload()
1097 dmap.preload()
1093
1098
1094 use_rust = True
1099 use_rust = True
1095 if rustmod is None:
1100 if rustmod is None:
1096 use_rust = False
1101 use_rust = False
1097 elif subrepos:
1102 elif subrepos:
1098 use_rust = False
1103 use_rust = False
1099 if bool(listunknown):
1104 if bool(listunknown):
1100 # Pathauditor does not exist yet in Rust, unknown files
1105 # Pathauditor does not exist yet in Rust, unknown files
1101 # can't be trusted.
1106 # can't be trusted.
1102 use_rust = False
1107 use_rust = False
1103 elif self._ignorefiles() and listignored:
1108 elif self._ignorefiles() and listignored:
1104 # Rust has no ignore mechanism yet, so don't use Rust for
1109 # Rust has no ignore mechanism yet, so don't use Rust for
1105 # commands that need ignore.
1110 # commands that need ignore.
1106 use_rust = False
1111 use_rust = False
1107 elif not match.always():
1112 elif not match.always():
1108 # Matchers have yet to be implemented
1113 # Matchers have yet to be implemented
1109 use_rust = False
1114 use_rust = False
1110
1115
1111 if use_rust:
1116 if use_rust:
1112 # Force Rayon (Rust parallelism library) to respect the number of
1117 # Force Rayon (Rust parallelism library) to respect the number of
1113 # workers. This is a temporary workaround until Rust code knows
1118 # workers. This is a temporary workaround until Rust code knows
1114 # how to read the config file.
1119 # how to read the config file.
1115 numcpus = self._ui.configint(b"worker", b"numcpus")
1120 numcpus = self._ui.configint(b"worker", b"numcpus")
1116 if numcpus is not None:
1121 if numcpus is not None:
1117 encoding.environ.setdefault(
1122 encoding.environ.setdefault(
1118 b'RAYON_NUM_THREADS', b'%d' % numcpus
1123 b'RAYON_NUM_THREADS', b'%d' % numcpus
1119 )
1124 )
1120
1125
1121 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1126 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1122 if not workers_enabled:
1127 if not workers_enabled:
1123 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1128 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1124
1129
1125 (
1130 (
1126 lookup,
1131 lookup,
1127 modified,
1132 modified,
1128 added,
1133 added,
1129 removed,
1134 removed,
1130 deleted,
1135 deleted,
1131 unknown,
1136 unknown,
1132 clean,
1137 clean,
1133 ) = rustmod.status(
1138 ) = rustmod.status(
1134 dmap._rustmap,
1139 dmap._rustmap,
1135 self._rootdir,
1140 self._rootdir,
1136 bool(listclean),
1141 bool(listclean),
1137 self._lastnormaltime,
1142 self._lastnormaltime,
1138 self._checkexec,
1143 self._checkexec,
1139 )
1144 )
1140
1145
1141 status = scmutil.status(
1146 status = scmutil.status(
1142 modified=modified,
1147 modified=modified,
1143 added=added,
1148 added=added,
1144 removed=removed,
1149 removed=removed,
1145 deleted=deleted,
1150 deleted=deleted,
1146 unknown=unknown,
1151 unknown=unknown,
1147 ignored=ignored,
1152 ignored=ignored,
1148 clean=clean,
1153 clean=clean,
1149 )
1154 )
1150 return (lookup, status)
1155 return (lookup, status)
1151
1156
1152 def noop(f):
1157 def noop(f):
1153 pass
1158 pass
1154
1159
1155 dcontains = dmap.__contains__
1160 dcontains = dmap.__contains__
1156 dget = dmap.__getitem__
1161 dget = dmap.__getitem__
1157 ladd = lookup.append # aka "unsure"
1162 ladd = lookup.append # aka "unsure"
1158 madd = modified.append
1163 madd = modified.append
1159 aadd = added.append
1164 aadd = added.append
1160 uadd = unknown.append if listunknown else noop
1165 uadd = unknown.append if listunknown else noop
1161 iadd = ignored.append if listignored else noop
1166 iadd = ignored.append if listignored else noop
1162 radd = removed.append
1167 radd = removed.append
1163 dadd = deleted.append
1168 dadd = deleted.append
1164 cadd = clean.append if listclean else noop
1169 cadd = clean.append if listclean else noop
1165 mexact = match.exact
1170 mexact = match.exact
1166 dirignore = self._dirignore
1171 dirignore = self._dirignore
1167 checkexec = self._checkexec
1172 checkexec = self._checkexec
1168 copymap = self._map.copymap
1173 copymap = self._map.copymap
1169 lastnormaltime = self._lastnormaltime
1174 lastnormaltime = self._lastnormaltime
1170
1175
1171 # We need to do full walks when either
1176 # We need to do full walks when either
1172 # - we're listing all clean files, or
1177 # - we're listing all clean files, or
1173 # - match.traversedir does something, because match.traversedir should
1178 # - match.traversedir does something, because match.traversedir should
1174 # be called for every dir in the working dir
1179 # be called for every dir in the working dir
1175 full = listclean or match.traversedir is not None
1180 full = listclean or match.traversedir is not None
1176 for fn, st in pycompat.iteritems(
1181 for fn, st in pycompat.iteritems(
1177 self.walk(match, subrepos, listunknown, listignored, full=full)
1182 self.walk(match, subrepos, listunknown, listignored, full=full)
1178 ):
1183 ):
1179 if not dcontains(fn):
1184 if not dcontains(fn):
1180 if (listignored or mexact(fn)) and dirignore(fn):
1185 if (listignored or mexact(fn)) and dirignore(fn):
1181 if listignored:
1186 if listignored:
1182 iadd(fn)
1187 iadd(fn)
1183 else:
1188 else:
1184 uadd(fn)
1189 uadd(fn)
1185 continue
1190 continue
1186
1191
1187 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1192 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1188 # written like that for performance reasons. dmap[fn] is not a
1193 # written like that for performance reasons. dmap[fn] is not a
1189 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1194 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1190 # opcode has fast paths when the value to be unpacked is a tuple or
1195 # opcode has fast paths when the value to be unpacked is a tuple or
1191 # a list, but falls back to creating a full-fledged iterator in
1196 # a list, but falls back to creating a full-fledged iterator in
1192 # general. That is much slower than simply accessing and storing the
1197 # general. That is much slower than simply accessing and storing the
1193 # tuple members one by one.
1198 # tuple members one by one.
1194 t = dget(fn)
1199 t = dget(fn)
1195 state = t[0]
1200 state = t[0]
1196 mode = t[1]
1201 mode = t[1]
1197 size = t[2]
1202 size = t[2]
1198 time = t[3]
1203 time = t[3]
1199
1204
1200 if not st and state in b"nma":
1205 if not st and state in b"nma":
1201 dadd(fn)
1206 dadd(fn)
1202 elif state == b'n':
1207 elif state == b'n':
1203 if (
1208 if (
1204 size >= 0
1209 size >= 0
1205 and (
1210 and (
1206 (size != st.st_size and size != st.st_size & _rangemask)
1211 (size != st.st_size and size != st.st_size & _rangemask)
1207 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1212 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1208 )
1213 )
1209 or size == -2 # other parent
1214 or size == -2 # other parent
1210 or fn in copymap
1215 or fn in copymap
1211 ):
1216 ):
1212 madd(fn)
1217 madd(fn)
1213 elif (
1218 elif (
1214 time != st[stat.ST_MTIME]
1219 time != st[stat.ST_MTIME]
1215 and time != st[stat.ST_MTIME] & _rangemask
1220 and time != st[stat.ST_MTIME] & _rangemask
1216 ):
1221 ):
1217 ladd(fn)
1222 ladd(fn)
1218 elif st[stat.ST_MTIME] == lastnormaltime:
1223 elif st[stat.ST_MTIME] == lastnormaltime:
1219 # fn may have just been marked as normal and it may have
1224 # fn may have just been marked as normal and it may have
1220 # changed in the same second without changing its size.
1225 # changed in the same second without changing its size.
1221 # This can happen if we quickly do multiple commits.
1226 # This can happen if we quickly do multiple commits.
1222 # Force lookup, so we don't miss such a racy file change.
1227 # Force lookup, so we don't miss such a racy file change.
1223 ladd(fn)
1228 ladd(fn)
1224 elif listclean:
1229 elif listclean:
1225 cadd(fn)
1230 cadd(fn)
1226 elif state == b'm':
1231 elif state == b'm':
1227 madd(fn)
1232 madd(fn)
1228 elif state == b'a':
1233 elif state == b'a':
1229 aadd(fn)
1234 aadd(fn)
1230 elif state == b'r':
1235 elif state == b'r':
1231 radd(fn)
1236 radd(fn)
1232
1237
1233 return (
1238 return (
1234 lookup,
1239 lookup,
1235 scmutil.status(
1240 scmutil.status(
1236 modified, added, removed, deleted, unknown, ignored, clean
1241 modified, added, removed, deleted, unknown, ignored, clean
1237 ),
1242 ),
1238 )
1243 )
1239
1244
1240 def matches(self, match):
1245 def matches(self, match):
1241 '''
1246 '''
1242 return files in the dirstate (in whatever state) filtered by match
1247 return files in the dirstate (in whatever state) filtered by match
1243 '''
1248 '''
1244 dmap = self._map
1249 dmap = self._map
1245 if match.always():
1250 if match.always():
1246 return dmap.keys()
1251 return dmap.keys()
1247 files = match.files()
1252 files = match.files()
1248 if match.isexact():
1253 if match.isexact():
1249 # fast path -- filter the other way around, since typically files is
1254 # fast path -- filter the other way around, since typically files is
1250 # much smaller than dmap
1255 # much smaller than dmap
1251 return [f for f in files if f in dmap]
1256 return [f for f in files if f in dmap]
1252 if match.prefix() and all(fn in dmap for fn in files):
1257 if match.prefix() and all(fn in dmap for fn in files):
1253 # fast path -- all the values are known to be files, so just return
1258 # fast path -- all the values are known to be files, so just return
1254 # that
1259 # that
1255 return list(files)
1260 return list(files)
1256 return [f for f in dmap if match(f)]
1261 return [f for f in dmap if match(f)]
1257
1262
1258 def _actualfilename(self, tr):
1263 def _actualfilename(self, tr):
1259 if tr:
1264 if tr:
1260 return self._pendingfilename
1265 return self._pendingfilename
1261 else:
1266 else:
1262 return self._filename
1267 return self._filename
1263
1268
1264 def savebackup(self, tr, backupname):
1269 def savebackup(self, tr, backupname):
1265 '''Save current dirstate into backup file'''
1270 '''Save current dirstate into backup file'''
1266 filename = self._actualfilename(tr)
1271 filename = self._actualfilename(tr)
1267 assert backupname != filename
1272 assert backupname != filename
1268
1273
1269 # use '_writedirstate' instead of 'write' to write changes certainly,
1274 # use '_writedirstate' instead of 'write' to write changes certainly,
1270 # because the latter omits writing out if transaction is running.
1275 # because the latter omits writing out if transaction is running.
1271 # output file will be used to create backup of dirstate at this point.
1276 # output file will be used to create backup of dirstate at this point.
1272 if self._dirty or not self._opener.exists(filename):
1277 if self._dirty or not self._opener.exists(filename):
1273 self._writedirstate(
1278 self._writedirstate(
1274 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1279 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1275 )
1280 )
1276
1281
1277 if tr:
1282 if tr:
1278 # ensure that subsequent tr.writepending returns True for
1283 # ensure that subsequent tr.writepending returns True for
1279 # changes written out above, even if dirstate is never
1284 # changes written out above, even if dirstate is never
1280 # changed after this
1285 # changed after this
1281 tr.addfilegenerator(
1286 tr.addfilegenerator(
1282 b'dirstate',
1287 b'dirstate',
1283 (self._filename,),
1288 (self._filename,),
1284 self._writedirstate,
1289 self._writedirstate,
1285 location=b'plain',
1290 location=b'plain',
1286 )
1291 )
1287
1292
1288 # ensure that pending file written above is unlinked at
1293 # ensure that pending file written above is unlinked at
1289 # failure, even if tr.writepending isn't invoked until the
1294 # failure, even if tr.writepending isn't invoked until the
1290 # end of this transaction
1295 # end of this transaction
1291 tr.registertmp(filename, location=b'plain')
1296 tr.registertmp(filename, location=b'plain')
1292
1297
1293 self._opener.tryunlink(backupname)
1298 self._opener.tryunlink(backupname)
1294 # hardlink backup is okay because _writedirstate is always called
1299 # hardlink backup is okay because _writedirstate is always called
1295 # with an "atomictemp=True" file.
1300 # with an "atomictemp=True" file.
1296 util.copyfile(
1301 util.copyfile(
1297 self._opener.join(filename),
1302 self._opener.join(filename),
1298 self._opener.join(backupname),
1303 self._opener.join(backupname),
1299 hardlink=True,
1304 hardlink=True,
1300 )
1305 )
1301
1306
1302 def restorebackup(self, tr, backupname):
1307 def restorebackup(self, tr, backupname):
1303 '''Restore dirstate by backup file'''
1308 '''Restore dirstate by backup file'''
1304 # this "invalidate()" prevents "wlock.release()" from writing
1309 # this "invalidate()" prevents "wlock.release()" from writing
1305 # changes of dirstate out after restoring from backup file
1310 # changes of dirstate out after restoring from backup file
1306 self.invalidate()
1311 self.invalidate()
1307 filename = self._actualfilename(tr)
1312 filename = self._actualfilename(tr)
1308 o = self._opener
1313 o = self._opener
1309 if util.samefile(o.join(backupname), o.join(filename)):
1314 if util.samefile(o.join(backupname), o.join(filename)):
1310 o.unlink(backupname)
1315 o.unlink(backupname)
1311 else:
1316 else:
1312 o.rename(backupname, filename, checkambig=True)
1317 o.rename(backupname, filename, checkambig=True)
1313
1318
1314 def clearbackup(self, tr, backupname):
1319 def clearbackup(self, tr, backupname):
1315 '''Clear backup file'''
1320 '''Clear backup file'''
1316 self._opener.unlink(backupname)
1321 self._opener.unlink(backupname)
1317
1322
1318
1323
1319 class dirstatemap(object):
1324 class dirstatemap(object):
1320 """Map encapsulating the dirstate's contents.
1325 """Map encapsulating the dirstate's contents.
1321
1326
1322 The dirstate contains the following state:
1327 The dirstate contains the following state:
1323
1328
1324 - `identity` is the identity of the dirstate file, which can be used to
1329 - `identity` is the identity of the dirstate file, which can be used to
1325 detect when changes have occurred to the dirstate file.
1330 detect when changes have occurred to the dirstate file.
1326
1331
1327 - `parents` is a pair containing the parents of the working copy. The
1332 - `parents` is a pair containing the parents of the working copy. The
1328 parents are updated by calling `setparents`.
1333 parents are updated by calling `setparents`.
1329
1334
1330 - the state map maps filenames to tuples of (state, mode, size, mtime),
1335 - the state map maps filenames to tuples of (state, mode, size, mtime),
1331 where state is a single character representing 'normal', 'added',
1336 where state is a single character representing 'normal', 'added',
1332 'removed', or 'merged'. It is read by treating the dirstate as a
1337 'removed', or 'merged'. It is read by treating the dirstate as a
1333 dict. File state is updated by calling the `addfile`, `removefile` and
1338 dict. File state is updated by calling the `addfile`, `removefile` and
1334 `dropfile` methods.
1339 `dropfile` methods.
1335
1340
1336 - `copymap` maps destination filenames to their source filename.
1341 - `copymap` maps destination filenames to their source filename.
1337
1342
1338 The dirstate also provides the following views onto the state:
1343 The dirstate also provides the following views onto the state:
1339
1344
1340 - `nonnormalset` is a set of the filenames that have state other
1345 - `nonnormalset` is a set of the filenames that have state other
1341 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1346 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1342
1347
1343 - `otherparentset` is a set of the filenames that are marked as coming
1348 - `otherparentset` is a set of the filenames that are marked as coming
1344 from the second parent when the dirstate is currently being merged.
1349 from the second parent when the dirstate is currently being merged.
1345
1350
1346 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1351 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1347 form that they appear as in the dirstate.
1352 form that they appear as in the dirstate.
1348
1353
1349 - `dirfoldmap` is a dict mapping normalized directory names to the
1354 - `dirfoldmap` is a dict mapping normalized directory names to the
1350 denormalized form that they appear as in the dirstate.
1355 denormalized form that they appear as in the dirstate.
1351 """
1356 """
1352
1357
1353 def __init__(self, ui, opener, root):
1358 def __init__(self, ui, opener, root):
1354 self._ui = ui
1359 self._ui = ui
1355 self._opener = opener
1360 self._opener = opener
1356 self._root = root
1361 self._root = root
1357 self._filename = b'dirstate'
1362 self._filename = b'dirstate'
1358
1363
1359 self._parents = None
1364 self._parents = None
1360 self._dirtyparents = False
1365 self._dirtyparents = False
1361
1366
1362 # for consistent view between _pl() and _read() invocations
1367 # for consistent view between _pl() and _read() invocations
1363 self._pendingmode = None
1368 self._pendingmode = None
1364
1369
1365 @propertycache
1370 @propertycache
1366 def _map(self):
1371 def _map(self):
1367 self._map = {}
1372 self._map = {}
1368 self.read()
1373 self.read()
1369 return self._map
1374 return self._map
1370
1375
1371 @propertycache
1376 @propertycache
1372 def copymap(self):
1377 def copymap(self):
1373 self.copymap = {}
1378 self.copymap = {}
1374 self._map
1379 self._map
1375 return self.copymap
1380 return self.copymap
1376
1381
1377 def clear(self):
1382 def clear(self):
1378 self._map.clear()
1383 self._map.clear()
1379 self.copymap.clear()
1384 self.copymap.clear()
1380 self.setparents(nullid, nullid)
1385 self.setparents(nullid, nullid)
1381 util.clearcachedproperty(self, b"_dirs")
1386 util.clearcachedproperty(self, b"_dirs")
1382 util.clearcachedproperty(self, b"_alldirs")
1387 util.clearcachedproperty(self, b"_alldirs")
1383 util.clearcachedproperty(self, b"filefoldmap")
1388 util.clearcachedproperty(self, b"filefoldmap")
1384 util.clearcachedproperty(self, b"dirfoldmap")
1389 util.clearcachedproperty(self, b"dirfoldmap")
1385 util.clearcachedproperty(self, b"nonnormalset")
1390 util.clearcachedproperty(self, b"nonnormalset")
1386 util.clearcachedproperty(self, b"otherparentset")
1391 util.clearcachedproperty(self, b"otherparentset")
1387
1392
1388 def items(self):
1393 def items(self):
1389 return pycompat.iteritems(self._map)
1394 return pycompat.iteritems(self._map)
1390
1395
1391 # forward for python2,3 compat
1396 # forward for python2,3 compat
1392 iteritems = items
1397 iteritems = items
1393
1398
1394 def __len__(self):
1399 def __len__(self):
1395 return len(self._map)
1400 return len(self._map)
1396
1401
1397 def __iter__(self):
1402 def __iter__(self):
1398 return iter(self._map)
1403 return iter(self._map)
1399
1404
1400 def get(self, key, default=None):
1405 def get(self, key, default=None):
1401 return self._map.get(key, default)
1406 return self._map.get(key, default)
1402
1407
1403 def __contains__(self, key):
1408 def __contains__(self, key):
1404 return key in self._map
1409 return key in self._map
1405
1410
1406 def __getitem__(self, key):
1411 def __getitem__(self, key):
1407 return self._map[key]
1412 return self._map[key]
1408
1413
1409 def keys(self):
1414 def keys(self):
1410 return self._map.keys()
1415 return self._map.keys()
1411
1416
1412 def preload(self):
1417 def preload(self):
1413 """Loads the underlying data, if it's not already loaded"""
1418 """Loads the underlying data, if it's not already loaded"""
1414 self._map
1419 self._map
1415
1420
1416 def addfile(self, f, oldstate, state, mode, size, mtime):
1421 def addfile(self, f, oldstate, state, mode, size, mtime):
1417 """Add a tracked file to the dirstate."""
1422 """Add a tracked file to the dirstate."""
1418 if oldstate in b"?r" and "_dirs" in self.__dict__:
1423 if oldstate in b"?r" and "_dirs" in self.__dict__:
1419 self._dirs.addpath(f)
1424 self._dirs.addpath(f)
1420 if oldstate == b"?" and "_alldirs" in self.__dict__:
1425 if oldstate == b"?" and "_alldirs" in self.__dict__:
1421 self._alldirs.addpath(f)
1426 self._alldirs.addpath(f)
1422 self._map[f] = dirstatetuple(state, mode, size, mtime)
1427 self._map[f] = dirstatetuple(state, mode, size, mtime)
1423 if state != b'n' or mtime == -1:
1428 if state != b'n' or mtime == -1:
1424 self.nonnormalset.add(f)
1429 self.nonnormalset.add(f)
1425 if size == -2:
1430 if size == -2:
1426 self.otherparentset.add(f)
1431 self.otherparentset.add(f)
1427
1432
1428 def removefile(self, f, oldstate, size):
1433 def removefile(self, f, oldstate, size):
1429 """
1434 """
1430 Mark a file as removed in the dirstate.
1435 Mark a file as removed in the dirstate.
1431
1436
1432 The `size` parameter is used to store sentinel values that indicate
1437 The `size` parameter is used to store sentinel values that indicate
1433 the file's previous state. In the future, we should refactor this
1438 the file's previous state. In the future, we should refactor this
1434 to be more explicit about what that state is.
1439 to be more explicit about what that state is.
1435 """
1440 """
1436 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1441 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1437 self._dirs.delpath(f)
1442 self._dirs.delpath(f)
1438 if oldstate == b"?" and "_alldirs" in self.__dict__:
1443 if oldstate == b"?" and "_alldirs" in self.__dict__:
1439 self._alldirs.addpath(f)
1444 self._alldirs.addpath(f)
1440 if "filefoldmap" in self.__dict__:
1445 if "filefoldmap" in self.__dict__:
1441 normed = util.normcase(f)
1446 normed = util.normcase(f)
1442 self.filefoldmap.pop(normed, None)
1447 self.filefoldmap.pop(normed, None)
1443 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1448 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1444 self.nonnormalset.add(f)
1449 self.nonnormalset.add(f)
1445
1450
1446 def dropfile(self, f, oldstate):
1451 def dropfile(self, f, oldstate):
1447 """
1452 """
1448 Remove a file from the dirstate. Returns True if the file was
1453 Remove a file from the dirstate. Returns True if the file was
1449 previously recorded.
1454 previously recorded.
1450 """
1455 """
1451 exists = self._map.pop(f, None) is not None
1456 exists = self._map.pop(f, None) is not None
1452 if exists:
1457 if exists:
1453 if oldstate != b"r" and "_dirs" in self.__dict__:
1458 if oldstate != b"r" and "_dirs" in self.__dict__:
1454 self._dirs.delpath(f)
1459 self._dirs.delpath(f)
1455 if "_alldirs" in self.__dict__:
1460 if "_alldirs" in self.__dict__:
1456 self._alldirs.delpath(f)
1461 self._alldirs.delpath(f)
1457 if "filefoldmap" in self.__dict__:
1462 if "filefoldmap" in self.__dict__:
1458 normed = util.normcase(f)
1463 normed = util.normcase(f)
1459 self.filefoldmap.pop(normed, None)
1464 self.filefoldmap.pop(normed, None)
1460 self.nonnormalset.discard(f)
1465 self.nonnormalset.discard(f)
1461 return exists
1466 return exists
1462
1467
1463 def clearambiguoustimes(self, files, now):
1468 def clearambiguoustimes(self, files, now):
1464 for f in files:
1469 for f in files:
1465 e = self.get(f)
1470 e = self.get(f)
1466 if e is not None and e[0] == b'n' and e[3] == now:
1471 if e is not None and e[0] == b'n' and e[3] == now:
1467 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1472 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1468 self.nonnormalset.add(f)
1473 self.nonnormalset.add(f)
1469
1474
1470 def nonnormalentries(self):
1475 def nonnormalentries(self):
1471 '''Compute the nonnormal dirstate entries from the dmap'''
1476 '''Compute the nonnormal dirstate entries from the dmap'''
1472 try:
1477 try:
1473 return parsers.nonnormalotherparententries(self._map)
1478 return parsers.nonnormalotherparententries(self._map)
1474 except AttributeError:
1479 except AttributeError:
1475 nonnorm = set()
1480 nonnorm = set()
1476 otherparent = set()
1481 otherparent = set()
1477 for fname, e in pycompat.iteritems(self._map):
1482 for fname, e in pycompat.iteritems(self._map):
1478 if e[0] != b'n' or e[3] == -1:
1483 if e[0] != b'n' or e[3] == -1:
1479 nonnorm.add(fname)
1484 nonnorm.add(fname)
1480 if e[0] == b'n' and e[2] == -2:
1485 if e[0] == b'n' and e[2] == -2:
1481 otherparent.add(fname)
1486 otherparent.add(fname)
1482 return nonnorm, otherparent
1487 return nonnorm, otherparent
1483
1488
1484 @propertycache
1489 @propertycache
1485 def filefoldmap(self):
1490 def filefoldmap(self):
1486 """Returns a dictionary mapping normalized case paths to their
1491 """Returns a dictionary mapping normalized case paths to their
1487 non-normalized versions.
1492 non-normalized versions.
1488 """
1493 """
1489 try:
1494 try:
1490 makefilefoldmap = parsers.make_file_foldmap
1495 makefilefoldmap = parsers.make_file_foldmap
1491 except AttributeError:
1496 except AttributeError:
1492 pass
1497 pass
1493 else:
1498 else:
1494 return makefilefoldmap(
1499 return makefilefoldmap(
1495 self._map, util.normcasespec, util.normcasefallback
1500 self._map, util.normcasespec, util.normcasefallback
1496 )
1501 )
1497
1502
1498 f = {}
1503 f = {}
1499 normcase = util.normcase
1504 normcase = util.normcase
1500 for name, s in pycompat.iteritems(self._map):
1505 for name, s in pycompat.iteritems(self._map):
1501 if s[0] != b'r':
1506 if s[0] != b'r':
1502 f[normcase(name)] = name
1507 f[normcase(name)] = name
1503 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1508 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1504 return f
1509 return f
1505
1510
1506 def hastrackeddir(self, d):
1511 def hastrackeddir(self, d):
1507 """
1512 """
1508 Returns True if the dirstate contains a tracked (not removed) file
1513 Returns True if the dirstate contains a tracked (not removed) file
1509 in this directory.
1514 in this directory.
1510 """
1515 """
1511 return d in self._dirs
1516 return d in self._dirs
1512
1517
1513 def hasdir(self, d):
1518 def hasdir(self, d):
1514 """
1519 """
1515 Returns True if the dirstate contains a file (tracked or removed)
1520 Returns True if the dirstate contains a file (tracked or removed)
1516 in this directory.
1521 in this directory.
1517 """
1522 """
1518 return d in self._alldirs
1523 return d in self._alldirs
1519
1524
1520 @propertycache
1525 @propertycache
1521 def _dirs(self):
1526 def _dirs(self):
1522 return pathutil.dirs(self._map, b'r')
1527 return pathutil.dirs(self._map, b'r')
1523
1528
1524 @propertycache
1529 @propertycache
1525 def _alldirs(self):
1530 def _alldirs(self):
1526 return pathutil.dirs(self._map)
1531 return pathutil.dirs(self._map)
1527
1532
1528 def _opendirstatefile(self):
1533 def _opendirstatefile(self):
1529 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1534 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1530 if self._pendingmode is not None and self._pendingmode != mode:
1535 if self._pendingmode is not None and self._pendingmode != mode:
1531 fp.close()
1536 fp.close()
1532 raise error.Abort(
1537 raise error.Abort(
1533 _(b'working directory state may be changed parallelly')
1538 _(b'working directory state may be changed parallelly')
1534 )
1539 )
1535 self._pendingmode = mode
1540 self._pendingmode = mode
1536 return fp
1541 return fp
1537
1542
1538 def parents(self):
1543 def parents(self):
1539 if not self._parents:
1544 if not self._parents:
1540 try:
1545 try:
1541 fp = self._opendirstatefile()
1546 fp = self._opendirstatefile()
1542 st = fp.read(40)
1547 st = fp.read(40)
1543 fp.close()
1548 fp.close()
1544 except IOError as err:
1549 except IOError as err:
1545 if err.errno != errno.ENOENT:
1550 if err.errno != errno.ENOENT:
1546 raise
1551 raise
1547 # File doesn't exist, so the current state is empty
1552 # File doesn't exist, so the current state is empty
1548 st = b''
1553 st = b''
1549
1554
1550 l = len(st)
1555 l = len(st)
1551 if l == 40:
1556 if l == 40:
1552 self._parents = (st[:20], st[20:40])
1557 self._parents = (st[:20], st[20:40])
1553 elif l == 0:
1558 elif l == 0:
1554 self._parents = (nullid, nullid)
1559 self._parents = (nullid, nullid)
1555 else:
1560 else:
1556 raise error.Abort(
1561 raise error.Abort(
1557 _(b'working directory state appears damaged!')
1562 _(b'working directory state appears damaged!')
1558 )
1563 )
1559
1564
1560 return self._parents
1565 return self._parents
1561
1566
1562 def setparents(self, p1, p2):
1567 def setparents(self, p1, p2):
1563 self._parents = (p1, p2)
1568 self._parents = (p1, p2)
1564 self._dirtyparents = True
1569 self._dirtyparents = True
1565
1570
1566 def read(self):
1571 def read(self):
1567 # ignore HG_PENDING because identity is used only for writing
1572 # ignore HG_PENDING because identity is used only for writing
1568 self.identity = util.filestat.frompath(
1573 self.identity = util.filestat.frompath(
1569 self._opener.join(self._filename)
1574 self._opener.join(self._filename)
1570 )
1575 )
1571
1576
1572 try:
1577 try:
1573 fp = self._opendirstatefile()
1578 fp = self._opendirstatefile()
1574 try:
1579 try:
1575 st = fp.read()
1580 st = fp.read()
1576 finally:
1581 finally:
1577 fp.close()
1582 fp.close()
1578 except IOError as err:
1583 except IOError as err:
1579 if err.errno != errno.ENOENT:
1584 if err.errno != errno.ENOENT:
1580 raise
1585 raise
1581 return
1586 return
1582 if not st:
1587 if not st:
1583 return
1588 return
1584
1589
1585 if util.safehasattr(parsers, b'dict_new_presized'):
1590 if util.safehasattr(parsers, b'dict_new_presized'):
1586 # Make an estimate of the number of files in the dirstate based on
1591 # Make an estimate of the number of files in the dirstate based on
1587 # its size. From a linear regression on a set of real-world repos,
1592 # its size. From a linear regression on a set of real-world repos,
1588 # all over 10,000 files, the size of a dirstate entry is 85
1593 # all over 10,000 files, the size of a dirstate entry is 85
1589 # bytes. The cost of resizing is significantly higher than the cost
1594 # bytes. The cost of resizing is significantly higher than the cost
1590 # of filling in a larger presized dict, so subtract 20% from the
1595 # of filling in a larger presized dict, so subtract 20% from the
1591 # size.
1596 # size.
1592 #
1597 #
1593 # This heuristic is imperfect in many ways, so in a future dirstate
1598 # This heuristic is imperfect in many ways, so in a future dirstate
1594 # format update it makes sense to just record the number of entries
1599 # format update it makes sense to just record the number of entries
1595 # on write.
1600 # on write.
1596 self._map = parsers.dict_new_presized(len(st) // 71)
1601 self._map = parsers.dict_new_presized(len(st) // 71)
1597
1602
1598 # Python's garbage collector triggers a GC each time a certain number
1603 # Python's garbage collector triggers a GC each time a certain number
1599 # of container objects (the number being defined by
1604 # of container objects (the number being defined by
1600 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1605 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1601 # for each file in the dirstate. The C version then immediately marks
1606 # for each file in the dirstate. The C version then immediately marks
1602 # them as not to be tracked by the collector. However, this has no
1607 # them as not to be tracked by the collector. However, this has no
1603 # effect on when GCs are triggered, only on what objects the GC looks
1608 # effect on when GCs are triggered, only on what objects the GC looks
1604 # into. This means that O(number of files) GCs are unavoidable.
1609 # into. This means that O(number of files) GCs are unavoidable.
1605 # Depending on when in the process's lifetime the dirstate is parsed,
1610 # Depending on when in the process's lifetime the dirstate is parsed,
1606 # this can get very expensive. As a workaround, disable GC while
1611 # this can get very expensive. As a workaround, disable GC while
1607 # parsing the dirstate.
1612 # parsing the dirstate.
1608 #
1613 #
1609 # (we cannot decorate the function directly since it is in a C module)
1614 # (we cannot decorate the function directly since it is in a C module)
1610 parse_dirstate = util.nogc(parsers.parse_dirstate)
1615 parse_dirstate = util.nogc(parsers.parse_dirstate)
1611 p = parse_dirstate(self._map, self.copymap, st)
1616 p = parse_dirstate(self._map, self.copymap, st)
1612 if not self._dirtyparents:
1617 if not self._dirtyparents:
1613 self.setparents(*p)
1618 self.setparents(*p)
1614
1619
1615 # Avoid excess attribute lookups by fast pathing certain checks
1620 # Avoid excess attribute lookups by fast pathing certain checks
1616 self.__contains__ = self._map.__contains__
1621 self.__contains__ = self._map.__contains__
1617 self.__getitem__ = self._map.__getitem__
1622 self.__getitem__ = self._map.__getitem__
1618 self.get = self._map.get
1623 self.get = self._map.get
1619
1624
1620 def write(self, st, now):
1625 def write(self, st, now):
1621 st.write(
1626 st.write(
1622 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1627 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1623 )
1628 )
1624 st.close()
1629 st.close()
1625 self._dirtyparents = False
1630 self._dirtyparents = False
1626 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1631 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1627
1632
1628 @propertycache
1633 @propertycache
1629 def nonnormalset(self):
1634 def nonnormalset(self):
1630 nonnorm, otherparents = self.nonnormalentries()
1635 nonnorm, otherparents = self.nonnormalentries()
1631 self.otherparentset = otherparents
1636 self.otherparentset = otherparents
1632 return nonnorm
1637 return nonnorm
1633
1638
1634 @propertycache
1639 @propertycache
1635 def otherparentset(self):
1640 def otherparentset(self):
1636 nonnorm, otherparents = self.nonnormalentries()
1641 nonnorm, otherparents = self.nonnormalentries()
1637 self.nonnormalset = nonnorm
1642 self.nonnormalset = nonnorm
1638 return otherparents
1643 return otherparents
1639
1644
1640 @propertycache
1645 @propertycache
1641 def identity(self):
1646 def identity(self):
1642 self._map
1647 self._map
1643 return self.identity
1648 return self.identity
1644
1649
1645 @propertycache
1650 @propertycache
1646 def dirfoldmap(self):
1651 def dirfoldmap(self):
1647 f = {}
1652 f = {}
1648 normcase = util.normcase
1653 normcase = util.normcase
1649 for name in self._dirs:
1654 for name in self._dirs:
1650 f[normcase(name)] = name
1655 f[normcase(name)] = name
1651 return f
1656 return f
1652
1657
1653
1658
1654 if rustmod is not None:
1659 if rustmod is not None:
1655
1660
1656 class dirstatemap(object):
1661 class dirstatemap(object):
1657 def __init__(self, ui, opener, root):
1662 def __init__(self, ui, opener, root):
1658 self._ui = ui
1663 self._ui = ui
1659 self._opener = opener
1664 self._opener = opener
1660 self._root = root
1665 self._root = root
1661 self._filename = b'dirstate'
1666 self._filename = b'dirstate'
1662 self._parents = None
1667 self._parents = None
1663 self._dirtyparents = False
1668 self._dirtyparents = False
1664
1669
1665 # for consistent view between _pl() and _read() invocations
1670 # for consistent view between _pl() and _read() invocations
1666 self._pendingmode = None
1671 self._pendingmode = None
1667
1672
1668 def addfile(self, *args, **kwargs):
1673 def addfile(self, *args, **kwargs):
1669 return self._rustmap.addfile(*args, **kwargs)
1674 return self._rustmap.addfile(*args, **kwargs)
1670
1675
1671 def removefile(self, *args, **kwargs):
1676 def removefile(self, *args, **kwargs):
1672 return self._rustmap.removefile(*args, **kwargs)
1677 return self._rustmap.removefile(*args, **kwargs)
1673
1678
1674 def dropfile(self, *args, **kwargs):
1679 def dropfile(self, *args, **kwargs):
1675 return self._rustmap.dropfile(*args, **kwargs)
1680 return self._rustmap.dropfile(*args, **kwargs)
1676
1681
1677 def clearambiguoustimes(self, *args, **kwargs):
1682 def clearambiguoustimes(self, *args, **kwargs):
1678 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1683 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1679
1684
1680 def nonnormalentries(self):
1685 def nonnormalentries(self):
1681 return self._rustmap.nonnormalentries()
1686 return self._rustmap.nonnormalentries()
1682
1687
1683 def get(self, *args, **kwargs):
1688 def get(self, *args, **kwargs):
1684 return self._rustmap.get(*args, **kwargs)
1689 return self._rustmap.get(*args, **kwargs)
1685
1690
1686 @propertycache
1691 @propertycache
1687 def _rustmap(self):
1692 def _rustmap(self):
1688 self._rustmap = rustmod.DirstateMap(self._root)
1693 self._rustmap = rustmod.DirstateMap(self._root)
1689 self.read()
1694 self.read()
1690 return self._rustmap
1695 return self._rustmap
1691
1696
1692 @property
1697 @property
1693 def copymap(self):
1698 def copymap(self):
1694 return self._rustmap.copymap()
1699 return self._rustmap.copymap()
1695
1700
1696 def preload(self):
1701 def preload(self):
1697 self._rustmap
1702 self._rustmap
1698
1703
1699 def clear(self):
1704 def clear(self):
1700 self._rustmap.clear()
1705 self._rustmap.clear()
1701 self.setparents(nullid, nullid)
1706 self.setparents(nullid, nullid)
1702 util.clearcachedproperty(self, b"_dirs")
1707 util.clearcachedproperty(self, b"_dirs")
1703 util.clearcachedproperty(self, b"_alldirs")
1708 util.clearcachedproperty(self, b"_alldirs")
1704 util.clearcachedproperty(self, b"dirfoldmap")
1709 util.clearcachedproperty(self, b"dirfoldmap")
1705
1710
1706 def items(self):
1711 def items(self):
1707 return self._rustmap.items()
1712 return self._rustmap.items()
1708
1713
1709 def keys(self):
1714 def keys(self):
1710 return iter(self._rustmap)
1715 return iter(self._rustmap)
1711
1716
1712 def __contains__(self, key):
1717 def __contains__(self, key):
1713 return key in self._rustmap
1718 return key in self._rustmap
1714
1719
1715 def __getitem__(self, item):
1720 def __getitem__(self, item):
1716 return self._rustmap[item]
1721 return self._rustmap[item]
1717
1722
1718 def __len__(self):
1723 def __len__(self):
1719 return len(self._rustmap)
1724 return len(self._rustmap)
1720
1725
1721 def __iter__(self):
1726 def __iter__(self):
1722 return iter(self._rustmap)
1727 return iter(self._rustmap)
1723
1728
1724 # forward for python2,3 compat
1729 # forward for python2,3 compat
1725 iteritems = items
1730 iteritems = items
1726
1731
1727 def _opendirstatefile(self):
1732 def _opendirstatefile(self):
1728 fp, mode = txnutil.trypending(
1733 fp, mode = txnutil.trypending(
1729 self._root, self._opener, self._filename
1734 self._root, self._opener, self._filename
1730 )
1735 )
1731 if self._pendingmode is not None and self._pendingmode != mode:
1736 if self._pendingmode is not None and self._pendingmode != mode:
1732 fp.close()
1737 fp.close()
1733 raise error.Abort(
1738 raise error.Abort(
1734 _(b'working directory state may be changed parallelly')
1739 _(b'working directory state may be changed parallelly')
1735 )
1740 )
1736 self._pendingmode = mode
1741 self._pendingmode = mode
1737 return fp
1742 return fp
1738
1743
1739 def setparents(self, p1, p2):
1744 def setparents(self, p1, p2):
1740 self._rustmap.setparents(p1, p2)
1745 self._rustmap.setparents(p1, p2)
1741 self._parents = (p1, p2)
1746 self._parents = (p1, p2)
1742 self._dirtyparents = True
1747 self._dirtyparents = True
1743
1748
1744 def parents(self):
1749 def parents(self):
1745 if not self._parents:
1750 if not self._parents:
1746 try:
1751 try:
1747 fp = self._opendirstatefile()
1752 fp = self._opendirstatefile()
1748 st = fp.read(40)
1753 st = fp.read(40)
1749 fp.close()
1754 fp.close()
1750 except IOError as err:
1755 except IOError as err:
1751 if err.errno != errno.ENOENT:
1756 if err.errno != errno.ENOENT:
1752 raise
1757 raise
1753 # File doesn't exist, so the current state is empty
1758 # File doesn't exist, so the current state is empty
1754 st = b''
1759 st = b''
1755
1760
1756 try:
1761 try:
1757 self._parents = self._rustmap.parents(st)
1762 self._parents = self._rustmap.parents(st)
1758 except ValueError:
1763 except ValueError:
1759 raise error.Abort(
1764 raise error.Abort(
1760 _(b'working directory state appears damaged!')
1765 _(b'working directory state appears damaged!')
1761 )
1766 )
1762
1767
1763 return self._parents
1768 return self._parents
1764
1769
1765 def read(self):
1770 def read(self):
1766 # ignore HG_PENDING because identity is used only for writing
1771 # ignore HG_PENDING because identity is used only for writing
1767 self.identity = util.filestat.frompath(
1772 self.identity = util.filestat.frompath(
1768 self._opener.join(self._filename)
1773 self._opener.join(self._filename)
1769 )
1774 )
1770
1775
1771 try:
1776 try:
1772 fp = self._opendirstatefile()
1777 fp = self._opendirstatefile()
1773 try:
1778 try:
1774 st = fp.read()
1779 st = fp.read()
1775 finally:
1780 finally:
1776 fp.close()
1781 fp.close()
1777 except IOError as err:
1782 except IOError as err:
1778 if err.errno != errno.ENOENT:
1783 if err.errno != errno.ENOENT:
1779 raise
1784 raise
1780 return
1785 return
1781 if not st:
1786 if not st:
1782 return
1787 return
1783
1788
1784 parse_dirstate = util.nogc(self._rustmap.read)
1789 parse_dirstate = util.nogc(self._rustmap.read)
1785 parents = parse_dirstate(st)
1790 parents = parse_dirstate(st)
1786 if parents and not self._dirtyparents:
1791 if parents and not self._dirtyparents:
1787 self.setparents(*parents)
1792 self.setparents(*parents)
1788
1793
1789 self.__contains__ = self._rustmap.__contains__
1794 self.__contains__ = self._rustmap.__contains__
1790 self.__getitem__ = self._rustmap.__getitem__
1795 self.__getitem__ = self._rustmap.__getitem__
1791 self.get = self._rustmap.get
1796 self.get = self._rustmap.get
1792
1797
1793 def write(self, st, now):
1798 def write(self, st, now):
1794 parents = self.parents()
1799 parents = self.parents()
1795 st.write(self._rustmap.write(parents[0], parents[1], now))
1800 st.write(self._rustmap.write(parents[0], parents[1], now))
1796 st.close()
1801 st.close()
1797 self._dirtyparents = False
1802 self._dirtyparents = False
1798
1803
1799 @propertycache
1804 @propertycache
1800 def filefoldmap(self):
1805 def filefoldmap(self):
1801 """Returns a dictionary mapping normalized case paths to their
1806 """Returns a dictionary mapping normalized case paths to their
1802 non-normalized versions.
1807 non-normalized versions.
1803 """
1808 """
1804 return self._rustmap.filefoldmapasdict()
1809 return self._rustmap.filefoldmapasdict()
1805
1810
1806 def hastrackeddir(self, d):
1811 def hastrackeddir(self, d):
1807 self._dirs # Trigger Python's propertycache
1812 self._dirs # Trigger Python's propertycache
1808 return self._rustmap.hastrackeddir(d)
1813 return self._rustmap.hastrackeddir(d)
1809
1814
1810 def hasdir(self, d):
1815 def hasdir(self, d):
1811 self._dirs # Trigger Python's propertycache
1816 self._dirs # Trigger Python's propertycache
1812 return self._rustmap.hasdir(d)
1817 return self._rustmap.hasdir(d)
1813
1818
1814 @propertycache
1819 @propertycache
1815 def _dirs(self):
1820 def _dirs(self):
1816 return self._rustmap.getdirs()
1821 return self._rustmap.getdirs()
1817
1822
1818 @propertycache
1823 @propertycache
1819 def _alldirs(self):
1824 def _alldirs(self):
1820 return self._rustmap.getalldirs()
1825 return self._rustmap.getalldirs()
1821
1826
1822 @propertycache
1827 @propertycache
1823 def identity(self):
1828 def identity(self):
1824 self._rustmap
1829 self._rustmap
1825 return self.identity
1830 return self.identity
1826
1831
1827 @property
1832 @property
1828 def nonnormalset(self):
1833 def nonnormalset(self):
1829 nonnorm, otherparents = self._rustmap.nonnormalentries()
1834 nonnorm, otherparents = self._rustmap.nonnormalentries()
1830 return nonnorm
1835 return nonnorm
1831
1836
1832 @property
1837 @property
1833 def otherparentset(self):
1838 def otherparentset(self):
1834 nonnorm, otherparents = self._rustmap.nonnormalentries()
1839 nonnorm, otherparents = self._rustmap.nonnormalentries()
1835 return otherparents
1840 return otherparents
1836
1841
1837 @propertycache
1842 @propertycache
1838 def dirfoldmap(self):
1843 def dirfoldmap(self):
1839 f = {}
1844 f = {}
1840 normcase = util.normcase
1845 normcase = util.normcase
1841 for name in self._dirs:
1846 for name in self._dirs:
1842 f[normcase(name)] = name
1847 f[normcase(name)] = name
1843 return f
1848 return f
@@ -1,2710 +1,2708 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import struct
14 import struct
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullhex,
22 nullhex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 )
25 )
26 from .pycompat import delattr
26 from .pycompat import delattr
27 from .thirdparty import attr
27 from .thirdparty import attr
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 filemerge,
32 filemerge,
33 match as matchmod,
33 match as matchmod,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 subrepoutil,
38 subrepoutil,
39 util,
39 util,
40 worker,
40 worker,
41 )
41 )
42
42
43 _pack = struct.pack
43 _pack = struct.pack
44 _unpack = struct.unpack
44 _unpack = struct.unpack
45
45
46
46
47 def _droponode(data):
47 def _droponode(data):
48 # used for compatibility for v1
48 # used for compatibility for v1
49 bits = data.split(b'\0')
49 bits = data.split(b'\0')
50 bits = bits[:-2] + bits[-1:]
50 bits = bits[:-2] + bits[-1:]
51 return b'\0'.join(bits)
51 return b'\0'.join(bits)
52
52
53
53
54 # Merge state record types. See ``mergestate`` docs for more.
54 # Merge state record types. See ``mergestate`` docs for more.
55 RECORD_LOCAL = b'L'
55 RECORD_LOCAL = b'L'
56 RECORD_OTHER = b'O'
56 RECORD_OTHER = b'O'
57 RECORD_MERGED = b'F'
57 RECORD_MERGED = b'F'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
60 RECORD_PATH_CONFLICT = b'P'
60 RECORD_PATH_CONFLICT = b'P'
61 RECORD_MERGE_DRIVER_STATE = b'm'
61 RECORD_MERGE_DRIVER_STATE = b'm'
62 RECORD_FILE_VALUES = b'f'
62 RECORD_FILE_VALUES = b'f'
63 RECORD_LABELS = b'l'
63 RECORD_LABELS = b'l'
64 RECORD_OVERRIDE = b't'
64 RECORD_OVERRIDE = b't'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
67
67
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
69 MERGE_DRIVER_STATE_MARKED = b'm'
69 MERGE_DRIVER_STATE_MARKED = b'm'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
71
71
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
77
77
78 ACTION_FORGET = b'f'
78 ACTION_FORGET = b'f'
79 ACTION_REMOVE = b'r'
79 ACTION_REMOVE = b'r'
80 ACTION_ADD = b'a'
80 ACTION_ADD = b'a'
81 ACTION_GET = b'g'
81 ACTION_GET = b'g'
82 ACTION_PATH_CONFLICT = b'p'
82 ACTION_PATH_CONFLICT = b'p'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
84 ACTION_ADD_MODIFIED = b'am'
84 ACTION_ADD_MODIFIED = b'am'
85 ACTION_CREATED = b'c'
85 ACTION_CREATED = b'c'
86 ACTION_DELETED_CHANGED = b'dc'
86 ACTION_DELETED_CHANGED = b'dc'
87 ACTION_CHANGED_DELETED = b'cd'
87 ACTION_CHANGED_DELETED = b'cd'
88 ACTION_MERGE = b'm'
88 ACTION_MERGE = b'm'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
91 ACTION_KEEP = b'k'
91 ACTION_KEEP = b'k'
92 ACTION_EXEC = b'e'
92 ACTION_EXEC = b'e'
93 ACTION_CREATED_MERGE = b'cm'
93 ACTION_CREATED_MERGE = b'cm'
94
94
95
95
96 class mergestate(object):
96 class mergestate(object):
97 '''track 3-way merge state of individual files
97 '''track 3-way merge state of individual files
98
98
99 The merge state is stored on disk when needed. Two files are used: one with
99 The merge state is stored on disk when needed. Two files are used: one with
100 an old format (version 1), and one with a new format (version 2). Version 2
100 an old format (version 1), and one with a new format (version 2). Version 2
101 stores a superset of the data in version 1, including new kinds of records
101 stores a superset of the data in version 1, including new kinds of records
102 in the future. For more about the new format, see the documentation for
102 in the future. For more about the new format, see the documentation for
103 `_readrecordsv2`.
103 `_readrecordsv2`.
104
104
105 Each record can contain arbitrary content, and has an associated type. This
105 Each record can contain arbitrary content, and has an associated type. This
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
107 versions of Mercurial that don't support it should abort. If `type` is
107 versions of Mercurial that don't support it should abort. If `type` is
108 lowercase, the record can be safely ignored.
108 lowercase, the record can be safely ignored.
109
109
110 Currently known records:
110 Currently known records:
111
111
112 L: the node of the "local" part of the merge (hexified version)
112 L: the node of the "local" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
114 F: a file to be merged entry
114 F: a file to be merged entry
115 C: a change/delete or delete/change conflict
115 C: a change/delete or delete/change conflict
116 D: a file that the external merge driver will merge internally
116 D: a file that the external merge driver will merge internally
117 (experimental)
117 (experimental)
118 P: a path conflict (file vs directory)
118 P: a path conflict (file vs directory)
119 m: the external merge driver defined for this merge plus its run state
119 m: the external merge driver defined for this merge plus its run state
120 (experimental)
120 (experimental)
121 f: a (filename, dictionary) tuple of optional values for a given file
121 f: a (filename, dictionary) tuple of optional values for a given file
122 X: unsupported mandatory record type (used in tests)
122 X: unsupported mandatory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
124 l: the labels for the parts of the merge.
124 l: the labels for the parts of the merge.
125
125
126 Merge driver run states (experimental):
126 Merge driver run states (experimental):
127 u: driver-resolved files unmarked -- needs to be run next time we're about
127 u: driver-resolved files unmarked -- needs to be run next time we're about
128 to resolve or commit
128 to resolve or commit
129 m: driver-resolved files marked -- only needs to be run before commit
129 m: driver-resolved files marked -- only needs to be run before commit
130 s: success/skipped -- does not need to be run any more
130 s: success/skipped -- does not need to be run any more
131
131
132 Merge record states (stored in self._state, indexed by filename):
132 Merge record states (stored in self._state, indexed by filename):
133 u: unresolved conflict
133 u: unresolved conflict
134 r: resolved conflict
134 r: resolved conflict
135 pu: unresolved path conflict (file conflicts with directory)
135 pu: unresolved path conflict (file conflicts with directory)
136 pr: resolved path conflict
136 pr: resolved path conflict
137 d: driver-resolved conflict
137 d: driver-resolved conflict
138
138
139 The resolve command transitions between 'u' and 'r' for conflicts and
139 The resolve command transitions between 'u' and 'r' for conflicts and
140 'pu' and 'pr' for path conflicts.
140 'pu' and 'pr' for path conflicts.
141 '''
141 '''
142
142
143 statepathv1 = b'merge/state'
143 statepathv1 = b'merge/state'
144 statepathv2 = b'merge/state2'
144 statepathv2 = b'merge/state2'
145
145
146 @staticmethod
146 @staticmethod
147 def clean(repo, node=None, other=None, labels=None):
147 def clean(repo, node=None, other=None, labels=None):
148 """Initialize a brand new merge state, removing any existing state on
148 """Initialize a brand new merge state, removing any existing state on
149 disk."""
149 disk."""
150 ms = mergestate(repo)
150 ms = mergestate(repo)
151 ms.reset(node, other, labels)
151 ms.reset(node, other, labels)
152 return ms
152 return ms
153
153
154 @staticmethod
154 @staticmethod
155 def read(repo):
155 def read(repo):
156 """Initialize the merge state, reading it from disk."""
156 """Initialize the merge state, reading it from disk."""
157 ms = mergestate(repo)
157 ms = mergestate(repo)
158 ms._read()
158 ms._read()
159 return ms
159 return ms
160
160
161 def __init__(self, repo):
161 def __init__(self, repo):
162 """Initialize the merge state.
162 """Initialize the merge state.
163
163
164 Do not use this directly! Instead call read() or clean()."""
164 Do not use this directly! Instead call read() or clean()."""
165 self._repo = repo
165 self._repo = repo
166 self._dirty = False
166 self._dirty = False
167 self._labels = None
167 self._labels = None
168
168
169 def reset(self, node=None, other=None, labels=None):
169 def reset(self, node=None, other=None, labels=None):
170 self._state = {}
170 self._state = {}
171 self._stateextras = {}
171 self._stateextras = {}
172 self._local = None
172 self._local = None
173 self._other = None
173 self._other = None
174 self._labels = labels
174 self._labels = labels
175 for var in ('localctx', 'otherctx'):
175 for var in ('localctx', 'otherctx'):
176 if var in vars(self):
176 if var in vars(self):
177 delattr(self, var)
177 delattr(self, var)
178 if node:
178 if node:
179 self._local = node
179 self._local = node
180 self._other = other
180 self._other = other
181 self._readmergedriver = None
181 self._readmergedriver = None
182 if self.mergedriver:
182 if self.mergedriver:
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
184 else:
184 else:
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
187 self._results = {}
187 self._results = {}
188 self._dirty = False
188 self._dirty = False
189
189
190 def _read(self):
190 def _read(self):
191 """Analyse each record content to restore a serialized state from disk
191 """Analyse each record content to restore a serialized state from disk
192
192
193 This function process "record" entry produced by the de-serialization
193 This function process "record" entry produced by the de-serialization
194 of on disk file.
194 of on disk file.
195 """
195 """
196 self._state = {}
196 self._state = {}
197 self._stateextras = {}
197 self._stateextras = {}
198 self._local = None
198 self._local = None
199 self._other = None
199 self._other = None
200 for var in ('localctx', 'otherctx'):
200 for var in ('localctx', 'otherctx'):
201 if var in vars(self):
201 if var in vars(self):
202 delattr(self, var)
202 delattr(self, var)
203 self._readmergedriver = None
203 self._readmergedriver = None
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
205 unsupported = set()
205 unsupported = set()
206 records = self._readrecords()
206 records = self._readrecords()
207 for rtype, record in records:
207 for rtype, record in records:
208 if rtype == RECORD_LOCAL:
208 if rtype == RECORD_LOCAL:
209 self._local = bin(record)
209 self._local = bin(record)
210 elif rtype == RECORD_OTHER:
210 elif rtype == RECORD_OTHER:
211 self._other = bin(record)
211 self._other = bin(record)
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
213 bits = record.split(b'\0', 1)
213 bits = record.split(b'\0', 1)
214 mdstate = bits[1]
214 mdstate = bits[1]
215 if len(mdstate) != 1 or mdstate not in (
215 if len(mdstate) != 1 or mdstate not in (
216 MERGE_DRIVER_STATE_UNMARKED,
216 MERGE_DRIVER_STATE_UNMARKED,
217 MERGE_DRIVER_STATE_MARKED,
217 MERGE_DRIVER_STATE_MARKED,
218 MERGE_DRIVER_STATE_SUCCESS,
218 MERGE_DRIVER_STATE_SUCCESS,
219 ):
219 ):
220 # the merge driver should be idempotent, so just rerun it
220 # the merge driver should be idempotent, so just rerun it
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
222
222
223 self._readmergedriver = bits[0]
223 self._readmergedriver = bits[0]
224 self._mdstate = mdstate
224 self._mdstate = mdstate
225 elif rtype in (
225 elif rtype in (
226 RECORD_MERGED,
226 RECORD_MERGED,
227 RECORD_CHANGEDELETE_CONFLICT,
227 RECORD_CHANGEDELETE_CONFLICT,
228 RECORD_PATH_CONFLICT,
228 RECORD_PATH_CONFLICT,
229 RECORD_MERGE_DRIVER_MERGE,
229 RECORD_MERGE_DRIVER_MERGE,
230 ):
230 ):
231 bits = record.split(b'\0')
231 bits = record.split(b'\0')
232 self._state[bits[0]] = bits[1:]
232 self._state[bits[0]] = bits[1:]
233 elif rtype == RECORD_FILE_VALUES:
233 elif rtype == RECORD_FILE_VALUES:
234 filename, rawextras = record.split(b'\0', 1)
234 filename, rawextras = record.split(b'\0', 1)
235 extraparts = rawextras.split(b'\0')
235 extraparts = rawextras.split(b'\0')
236 extras = {}
236 extras = {}
237 i = 0
237 i = 0
238 while i < len(extraparts):
238 while i < len(extraparts):
239 extras[extraparts[i]] = extraparts[i + 1]
239 extras[extraparts[i]] = extraparts[i + 1]
240 i += 2
240 i += 2
241
241
242 self._stateextras[filename] = extras
242 self._stateextras[filename] = extras
243 elif rtype == RECORD_LABELS:
243 elif rtype == RECORD_LABELS:
244 labels = record.split(b'\0', 2)
244 labels = record.split(b'\0', 2)
245 self._labels = [l for l in labels if len(l) > 0]
245 self._labels = [l for l in labels if len(l) > 0]
246 elif not rtype.islower():
246 elif not rtype.islower():
247 unsupported.add(rtype)
247 unsupported.add(rtype)
248 self._results = {}
248 self._results = {}
249 self._dirty = False
249 self._dirty = False
250
250
251 if unsupported:
251 if unsupported:
252 raise error.UnsupportedMergeRecords(unsupported)
252 raise error.UnsupportedMergeRecords(unsupported)
253
253
254 def _readrecords(self):
254 def _readrecords(self):
255 """Read merge state from disk and return a list of record (TYPE, data)
255 """Read merge state from disk and return a list of record (TYPE, data)
256
256
257 We read data from both v1 and v2 files and decide which one to use.
257 We read data from both v1 and v2 files and decide which one to use.
258
258
259 V1 has been used by version prior to 2.9.1 and contains less data than
259 V1 has been used by version prior to 2.9.1 and contains less data than
260 v2. We read both versions and check if no data in v2 contradicts
260 v2. We read both versions and check if no data in v2 contradicts
261 v1. If there is not contradiction we can safely assume that both v1
261 v1. If there is not contradiction we can safely assume that both v1
262 and v2 were written at the same time and use the extract data in v2. If
262 and v2 were written at the same time and use the extract data in v2. If
263 there is contradiction we ignore v2 content as we assume an old version
263 there is contradiction we ignore v2 content as we assume an old version
264 of Mercurial has overwritten the mergestate file and left an old v2
264 of Mercurial has overwritten the mergestate file and left an old v2
265 file around.
265 file around.
266
266
267 returns list of record [(TYPE, data), ...]"""
267 returns list of record [(TYPE, data), ...]"""
268 v1records = self._readrecordsv1()
268 v1records = self._readrecordsv1()
269 v2records = self._readrecordsv2()
269 v2records = self._readrecordsv2()
270 if self._v1v2match(v1records, v2records):
270 if self._v1v2match(v1records, v2records):
271 return v2records
271 return v2records
272 else:
272 else:
273 # v1 file is newer than v2 file, use it
273 # v1 file is newer than v2 file, use it
274 # we have to infer the "other" changeset of the merge
274 # we have to infer the "other" changeset of the merge
275 # we cannot do better than that with v1 of the format
275 # we cannot do better than that with v1 of the format
276 mctx = self._repo[None].parents()[-1]
276 mctx = self._repo[None].parents()[-1]
277 v1records.append((RECORD_OTHER, mctx.hex()))
277 v1records.append((RECORD_OTHER, mctx.hex()))
278 # add place holder "other" file node information
278 # add place holder "other" file node information
279 # nobody is using it yet so we do no need to fetch the data
279 # nobody is using it yet so we do no need to fetch the data
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
281 for idx, r in enumerate(v1records):
281 for idx, r in enumerate(v1records):
282 if r[0] == RECORD_MERGED:
282 if r[0] == RECORD_MERGED:
283 bits = r[1].split(b'\0')
283 bits = r[1].split(b'\0')
284 bits.insert(-2, b'')
284 bits.insert(-2, b'')
285 v1records[idx] = (r[0], b'\0'.join(bits))
285 v1records[idx] = (r[0], b'\0'.join(bits))
286 return v1records
286 return v1records
287
287
288 def _v1v2match(self, v1records, v2records):
288 def _v1v2match(self, v1records, v2records):
289 oldv2 = set() # old format version of v2 record
289 oldv2 = set() # old format version of v2 record
290 for rec in v2records:
290 for rec in v2records:
291 if rec[0] == RECORD_LOCAL:
291 if rec[0] == RECORD_LOCAL:
292 oldv2.add(rec)
292 oldv2.add(rec)
293 elif rec[0] == RECORD_MERGED:
293 elif rec[0] == RECORD_MERGED:
294 # drop the onode data (not contained in v1)
294 # drop the onode data (not contained in v1)
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
296 for rec in v1records:
296 for rec in v1records:
297 if rec not in oldv2:
297 if rec not in oldv2:
298 return False
298 return False
299 else:
299 else:
300 return True
300 return True
301
301
302 def _readrecordsv1(self):
302 def _readrecordsv1(self):
303 """read on disk merge state for version 1 file
303 """read on disk merge state for version 1 file
304
304
305 returns list of record [(TYPE, data), ...]
305 returns list of record [(TYPE, data), ...]
306
306
307 Note: the "F" data from this file are one entry short
307 Note: the "F" data from this file are one entry short
308 (no "other file node" entry)
308 (no "other file node" entry)
309 """
309 """
310 records = []
310 records = []
311 try:
311 try:
312 f = self._repo.vfs(self.statepathv1)
312 f = self._repo.vfs(self.statepathv1)
313 for i, l in enumerate(f):
313 for i, l in enumerate(f):
314 if i == 0:
314 if i == 0:
315 records.append((RECORD_LOCAL, l[:-1]))
315 records.append((RECORD_LOCAL, l[:-1]))
316 else:
316 else:
317 records.append((RECORD_MERGED, l[:-1]))
317 records.append((RECORD_MERGED, l[:-1]))
318 f.close()
318 f.close()
319 except IOError as err:
319 except IOError as err:
320 if err.errno != errno.ENOENT:
320 if err.errno != errno.ENOENT:
321 raise
321 raise
322 return records
322 return records
323
323
324 def _readrecordsv2(self):
324 def _readrecordsv2(self):
325 """read on disk merge state for version 2 file
325 """read on disk merge state for version 2 file
326
326
327 This format is a list of arbitrary records of the form:
327 This format is a list of arbitrary records of the form:
328
328
329 [type][length][content]
329 [type][length][content]
330
330
331 `type` is a single character, `length` is a 4 byte integer, and
331 `type` is a single character, `length` is a 4 byte integer, and
332 `content` is an arbitrary byte sequence of length `length`.
332 `content` is an arbitrary byte sequence of length `length`.
333
333
334 Mercurial versions prior to 3.7 have a bug where if there are
334 Mercurial versions prior to 3.7 have a bug where if there are
335 unsupported mandatory merge records, attempting to clear out the merge
335 unsupported mandatory merge records, attempting to clear out the merge
336 state with hg update --clean or similar aborts. The 't' record type
336 state with hg update --clean or similar aborts. The 't' record type
337 works around that by writing out what those versions treat as an
337 works around that by writing out what those versions treat as an
338 advisory record, but later versions interpret as special: the first
338 advisory record, but later versions interpret as special: the first
339 character is the 'real' record type and everything onwards is the data.
339 character is the 'real' record type and everything onwards is the data.
340
340
341 Returns list of records [(TYPE, data), ...]."""
341 Returns list of records [(TYPE, data), ...]."""
342 records = []
342 records = []
343 try:
343 try:
344 f = self._repo.vfs(self.statepathv2)
344 f = self._repo.vfs(self.statepathv2)
345 data = f.read()
345 data = f.read()
346 off = 0
346 off = 0
347 end = len(data)
347 end = len(data)
348 while off < end:
348 while off < end:
349 rtype = data[off : off + 1]
349 rtype = data[off : off + 1]
350 off += 1
350 off += 1
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
352 off += 4
352 off += 4
353 record = data[off : (off + length)]
353 record = data[off : (off + length)]
354 off += length
354 off += length
355 if rtype == RECORD_OVERRIDE:
355 if rtype == RECORD_OVERRIDE:
356 rtype, record = record[0:1], record[1:]
356 rtype, record = record[0:1], record[1:]
357 records.append((rtype, record))
357 records.append((rtype, record))
358 f.close()
358 f.close()
359 except IOError as err:
359 except IOError as err:
360 if err.errno != errno.ENOENT:
360 if err.errno != errno.ENOENT:
361 raise
361 raise
362 return records
362 return records
363
363
364 @util.propertycache
364 @util.propertycache
365 def mergedriver(self):
365 def mergedriver(self):
366 # protect against the following:
366 # protect against the following:
367 # - A configures a malicious merge driver in their hgrc, then
367 # - A configures a malicious merge driver in their hgrc, then
368 # pauses the merge
368 # pauses the merge
369 # - A edits their hgrc to remove references to the merge driver
369 # - A edits their hgrc to remove references to the merge driver
370 # - A gives a copy of their entire repo, including .hg, to B
370 # - A gives a copy of their entire repo, including .hg, to B
371 # - B inspects .hgrc and finds it to be clean
371 # - B inspects .hgrc and finds it to be clean
372 # - B then continues the merge and the malicious merge driver
372 # - B then continues the merge and the malicious merge driver
373 # gets invoked
373 # gets invoked
374 configmergedriver = self._repo.ui.config(
374 configmergedriver = self._repo.ui.config(
375 b'experimental', b'mergedriver'
375 b'experimental', b'mergedriver'
376 )
376 )
377 if (
377 if (
378 self._readmergedriver is not None
378 self._readmergedriver is not None
379 and self._readmergedriver != configmergedriver
379 and self._readmergedriver != configmergedriver
380 ):
380 ):
381 raise error.ConfigError(
381 raise error.ConfigError(
382 _(b"merge driver changed since merge started"),
382 _(b"merge driver changed since merge started"),
383 hint=_(b"revert merge driver change or abort merge"),
383 hint=_(b"revert merge driver change or abort merge"),
384 )
384 )
385
385
386 return configmergedriver
386 return configmergedriver
387
387
388 @util.propertycache
388 @util.propertycache
389 def localctx(self):
389 def localctx(self):
390 if self._local is None:
390 if self._local is None:
391 msg = b"localctx accessed but self._local isn't set"
391 msg = b"localctx accessed but self._local isn't set"
392 raise error.ProgrammingError(msg)
392 raise error.ProgrammingError(msg)
393 return self._repo[self._local]
393 return self._repo[self._local]
394
394
395 @util.propertycache
395 @util.propertycache
396 def otherctx(self):
396 def otherctx(self):
397 if self._other is None:
397 if self._other is None:
398 msg = b"otherctx accessed but self._other isn't set"
398 msg = b"otherctx accessed but self._other isn't set"
399 raise error.ProgrammingError(msg)
399 raise error.ProgrammingError(msg)
400 return self._repo[self._other]
400 return self._repo[self._other]
401
401
402 def active(self):
402 def active(self):
403 """Whether mergestate is active.
403 """Whether mergestate is active.
404
404
405 Returns True if there appears to be mergestate. This is a rough proxy
405 Returns True if there appears to be mergestate. This is a rough proxy
406 for "is a merge in progress."
406 for "is a merge in progress."
407 """
407 """
408 # Check local variables before looking at filesystem for performance
408 # Check local variables before looking at filesystem for performance
409 # reasons.
409 # reasons.
410 return (
410 return (
411 bool(self._local)
411 bool(self._local)
412 or bool(self._state)
412 or bool(self._state)
413 or self._repo.vfs.exists(self.statepathv1)
413 or self._repo.vfs.exists(self.statepathv1)
414 or self._repo.vfs.exists(self.statepathv2)
414 or self._repo.vfs.exists(self.statepathv2)
415 )
415 )
416
416
417 def commit(self):
417 def commit(self):
418 """Write current state on disk (if necessary)"""
418 """Write current state on disk (if necessary)"""
419 if self._dirty:
419 if self._dirty:
420 records = self._makerecords()
420 records = self._makerecords()
421 self._writerecords(records)
421 self._writerecords(records)
422 self._dirty = False
422 self._dirty = False
423
423
424 def _makerecords(self):
424 def _makerecords(self):
425 records = []
425 records = []
426 records.append((RECORD_LOCAL, hex(self._local)))
426 records.append((RECORD_LOCAL, hex(self._local)))
427 records.append((RECORD_OTHER, hex(self._other)))
427 records.append((RECORD_OTHER, hex(self._other)))
428 if self.mergedriver:
428 if self.mergedriver:
429 records.append(
429 records.append(
430 (
430 (
431 RECORD_MERGE_DRIVER_STATE,
431 RECORD_MERGE_DRIVER_STATE,
432 b'\0'.join([self.mergedriver, self._mdstate]),
432 b'\0'.join([self.mergedriver, self._mdstate]),
433 )
433 )
434 )
434 )
435 # Write out state items. In all cases, the value of the state map entry
435 # Write out state items. In all cases, the value of the state map entry
436 # is written as the contents of the record. The record type depends on
436 # is written as the contents of the record. The record type depends on
437 # the type of state that is stored, and capital-letter records are used
437 # the type of state that is stored, and capital-letter records are used
438 # to prevent older versions of Mercurial that do not support the feature
438 # to prevent older versions of Mercurial that do not support the feature
439 # from loading them.
439 # from loading them.
440 for filename, v in pycompat.iteritems(self._state):
440 for filename, v in pycompat.iteritems(self._state):
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
442 # Driver-resolved merge. These are stored in 'D' records.
442 # Driver-resolved merge. These are stored in 'D' records.
443 records.append(
443 records.append(
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
445 )
445 )
446 elif v[0] in (
446 elif v[0] in (
447 MERGE_RECORD_UNRESOLVED_PATH,
447 MERGE_RECORD_UNRESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
449 ):
449 ):
450 # Path conflicts. These are stored in 'P' records. The current
450 # Path conflicts. These are stored in 'P' records. The current
451 # resolution state ('pu' or 'pr') is stored within the record.
451 # resolution state ('pu' or 'pr') is stored within the record.
452 records.append(
452 records.append(
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
454 )
454 )
455 elif v[1] == nullhex or v[6] == nullhex:
455 elif v[1] == nullhex or v[6] == nullhex:
456 # Change/Delete or Delete/Change conflicts. These are stored in
456 # Change/Delete or Delete/Change conflicts. These are stored in
457 # 'C' records. v[1] is the local file, and is nullhex when the
457 # 'C' records. v[1] is the local file, and is nullhex when the
458 # file is deleted locally ('dc'). v[6] is the remote file, and
458 # file is deleted locally ('dc'). v[6] is the remote file, and
459 # is nullhex when the file is deleted remotely ('cd').
459 # is nullhex when the file is deleted remotely ('cd').
460 records.append(
460 records.append(
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
462 )
462 )
463 else:
463 else:
464 # Normal files. These are stored in 'F' records.
464 # Normal files. These are stored in 'F' records.
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
467 rawextras = b'\0'.join(
467 rawextras = b'\0'.join(
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
469 )
469 )
470 records.append(
470 records.append(
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
472 )
472 )
473 if self._labels is not None:
473 if self._labels is not None:
474 labels = b'\0'.join(self._labels)
474 labels = b'\0'.join(self._labels)
475 records.append((RECORD_LABELS, labels))
475 records.append((RECORD_LABELS, labels))
476 return records
476 return records
477
477
478 def _writerecords(self, records):
478 def _writerecords(self, records):
479 """Write current state on disk (both v1 and v2)"""
479 """Write current state on disk (both v1 and v2)"""
480 self._writerecordsv1(records)
480 self._writerecordsv1(records)
481 self._writerecordsv2(records)
481 self._writerecordsv2(records)
482
482
483 def _writerecordsv1(self, records):
483 def _writerecordsv1(self, records):
484 """Write current state on disk in a version 1 file"""
484 """Write current state on disk in a version 1 file"""
485 f = self._repo.vfs(self.statepathv1, b'wb')
485 f = self._repo.vfs(self.statepathv1, b'wb')
486 irecords = iter(records)
486 irecords = iter(records)
487 lrecords = next(irecords)
487 lrecords = next(irecords)
488 assert lrecords[0] == RECORD_LOCAL
488 assert lrecords[0] == RECORD_LOCAL
489 f.write(hex(self._local) + b'\n')
489 f.write(hex(self._local) + b'\n')
490 for rtype, data in irecords:
490 for rtype, data in irecords:
491 if rtype == RECORD_MERGED:
491 if rtype == RECORD_MERGED:
492 f.write(b'%s\n' % _droponode(data))
492 f.write(b'%s\n' % _droponode(data))
493 f.close()
493 f.close()
494
494
495 def _writerecordsv2(self, records):
495 def _writerecordsv2(self, records):
496 """Write current state on disk in a version 2 file
496 """Write current state on disk in a version 2 file
497
497
498 See the docstring for _readrecordsv2 for why we use 't'."""
498 See the docstring for _readrecordsv2 for why we use 't'."""
499 # these are the records that all version 2 clients can read
499 # these are the records that all version 2 clients can read
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
501 f = self._repo.vfs(self.statepathv2, b'wb')
501 f = self._repo.vfs(self.statepathv2, b'wb')
502 for key, data in records:
502 for key, data in records:
503 assert len(key) == 1
503 assert len(key) == 1
504 if key not in allowlist:
504 if key not in allowlist:
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
506 format = b'>sI%is' % len(data)
506 format = b'>sI%is' % len(data)
507 f.write(_pack(format, key, len(data), data))
507 f.write(_pack(format, key, len(data), data))
508 f.close()
508 f.close()
509
509
510 @staticmethod
510 @staticmethod
511 def getlocalkey(path):
511 def getlocalkey(path):
512 """hash the path of a local file context for storage in the .hg/merge
512 """hash the path of a local file context for storage in the .hg/merge
513 directory."""
513 directory."""
514
514
515 return hex(hashlib.sha1(path).digest())
515 return hex(hashlib.sha1(path).digest())
516
516
517 def add(self, fcl, fco, fca, fd):
517 def add(self, fcl, fco, fca, fd):
518 """add a new (potentially?) conflicting file the merge state
518 """add a new (potentially?) conflicting file the merge state
519 fcl: file context for local,
519 fcl: file context for local,
520 fco: file context for remote,
520 fco: file context for remote,
521 fca: file context for ancestors,
521 fca: file context for ancestors,
522 fd: file path of the resulting merge.
522 fd: file path of the resulting merge.
523
523
524 note: also write the local version to the `.hg/merge` directory.
524 note: also write the local version to the `.hg/merge` directory.
525 """
525 """
526 if fcl.isabsent():
526 if fcl.isabsent():
527 localkey = nullhex
527 localkey = nullhex
528 else:
528 else:
529 localkey = mergestate.getlocalkey(fcl.path())
529 localkey = mergestate.getlocalkey(fcl.path())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
531 self._state[fd] = [
531 self._state[fd] = [
532 MERGE_RECORD_UNRESOLVED,
532 MERGE_RECORD_UNRESOLVED,
533 localkey,
533 localkey,
534 fcl.path(),
534 fcl.path(),
535 fca.path(),
535 fca.path(),
536 hex(fca.filenode()),
536 hex(fca.filenode()),
537 fco.path(),
537 fco.path(),
538 hex(fco.filenode()),
538 hex(fco.filenode()),
539 fcl.flags(),
539 fcl.flags(),
540 ]
540 ]
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
542 self._dirty = True
542 self._dirty = True
543
543
544 def addpath(self, path, frename, forigin):
544 def addpath(self, path, frename, forigin):
545 """add a new conflicting path to the merge state
545 """add a new conflicting path to the merge state
546 path: the path that conflicts
546 path: the path that conflicts
547 frename: the filename the conflicting file was renamed to
547 frename: the filename the conflicting file was renamed to
548 forigin: origin of the file ('l' or 'r' for local/remote)
548 forigin: origin of the file ('l' or 'r' for local/remote)
549 """
549 """
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
551 self._dirty = True
551 self._dirty = True
552
552
553 def __contains__(self, dfile):
553 def __contains__(self, dfile):
554 return dfile in self._state
554 return dfile in self._state
555
555
556 def __getitem__(self, dfile):
556 def __getitem__(self, dfile):
557 return self._state[dfile][0]
557 return self._state[dfile][0]
558
558
559 def __iter__(self):
559 def __iter__(self):
560 return iter(sorted(self._state))
560 return iter(sorted(self._state))
561
561
562 def files(self):
562 def files(self):
563 return self._state.keys()
563 return self._state.keys()
564
564
565 def mark(self, dfile, state):
565 def mark(self, dfile, state):
566 self._state[dfile][0] = state
566 self._state[dfile][0] = state
567 self._dirty = True
567 self._dirty = True
568
568
569 def mdstate(self):
569 def mdstate(self):
570 return self._mdstate
570 return self._mdstate
571
571
572 def unresolved(self):
572 def unresolved(self):
573 """Obtain the paths of unresolved files."""
573 """Obtain the paths of unresolved files."""
574
574
575 for f, entry in pycompat.iteritems(self._state):
575 for f, entry in pycompat.iteritems(self._state):
576 if entry[0] in (
576 if entry[0] in (
577 MERGE_RECORD_UNRESOLVED,
577 MERGE_RECORD_UNRESOLVED,
578 MERGE_RECORD_UNRESOLVED_PATH,
578 MERGE_RECORD_UNRESOLVED_PATH,
579 ):
579 ):
580 yield f
580 yield f
581
581
582 def driverresolved(self):
582 def driverresolved(self):
583 """Obtain the paths of driver-resolved files."""
583 """Obtain the paths of driver-resolved files."""
584
584
585 for f, entry in self._state.items():
585 for f, entry in self._state.items():
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
587 yield f
587 yield f
588
588
589 def extras(self, filename):
589 def extras(self, filename):
590 return self._stateextras.setdefault(filename, {})
590 return self._stateextras.setdefault(filename, {})
591
591
592 def _resolve(self, preresolve, dfile, wctx):
592 def _resolve(self, preresolve, dfile, wctx):
593 """rerun merge process for file path `dfile`"""
593 """rerun merge process for file path `dfile`"""
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
595 return True, 0
595 return True, 0
596 stateentry = self._state[dfile]
596 stateentry = self._state[dfile]
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
598 octx = self._repo[self._other]
598 octx = self._repo[self._other]
599 extras = self.extras(dfile)
599 extras = self.extras(dfile)
600 anccommitnode = extras.get(b'ancestorlinknode')
600 anccommitnode = extras.get(b'ancestorlinknode')
601 if anccommitnode:
601 if anccommitnode:
602 actx = self._repo[anccommitnode]
602 actx = self._repo[anccommitnode]
603 else:
603 else:
604 actx = None
604 actx = None
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
606 fco = self._filectxorabsent(onode, octx, ofile)
606 fco = self._filectxorabsent(onode, octx, ofile)
607 # TODO: move this to filectxorabsent
607 # TODO: move this to filectxorabsent
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
609 # "premerge" x flags
609 # "premerge" x flags
610 flo = fco.flags()
610 flo = fco.flags()
611 fla = fca.flags()
611 fla = fca.flags()
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
613 if fca.node() == nullid and flags != flo:
613 if fca.node() == nullid and flags != flo:
614 if preresolve:
614 if preresolve:
615 self._repo.ui.warn(
615 self._repo.ui.warn(
616 _(
616 _(
617 b'warning: cannot merge flags for %s '
617 b'warning: cannot merge flags for %s '
618 b'without common ancestor - keeping local flags\n'
618 b'without common ancestor - keeping local flags\n'
619 )
619 )
620 % afile
620 % afile
621 )
621 )
622 elif flags == fla:
622 elif flags == fla:
623 flags = flo
623 flags = flo
624 if preresolve:
624 if preresolve:
625 # restore local
625 # restore local
626 if localkey != nullhex:
626 if localkey != nullhex:
627 f = self._repo.vfs(b'merge/' + localkey)
627 f = self._repo.vfs(b'merge/' + localkey)
628 wctx[dfile].write(f.read(), flags)
628 wctx[dfile].write(f.read(), flags)
629 f.close()
629 f.close()
630 else:
630 else:
631 wctx[dfile].remove(ignoremissing=True)
631 wctx[dfile].remove(ignoremissing=True)
632 complete, r, deleted = filemerge.premerge(
632 complete, r, deleted = filemerge.premerge(
633 self._repo,
633 self._repo,
634 wctx,
634 wctx,
635 self._local,
635 self._local,
636 lfile,
636 lfile,
637 fcd,
637 fcd,
638 fco,
638 fco,
639 fca,
639 fca,
640 labels=self._labels,
640 labels=self._labels,
641 )
641 )
642 else:
642 else:
643 complete, r, deleted = filemerge.filemerge(
643 complete, r, deleted = filemerge.filemerge(
644 self._repo,
644 self._repo,
645 wctx,
645 wctx,
646 self._local,
646 self._local,
647 lfile,
647 lfile,
648 fcd,
648 fcd,
649 fco,
649 fco,
650 fca,
650 fca,
651 labels=self._labels,
651 labels=self._labels,
652 )
652 )
653 if r is None:
653 if r is None:
654 # no real conflict
654 # no real conflict
655 del self._state[dfile]
655 del self._state[dfile]
656 self._stateextras.pop(dfile, None)
656 self._stateextras.pop(dfile, None)
657 self._dirty = True
657 self._dirty = True
658 elif not r:
658 elif not r:
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
660
660
661 if complete:
661 if complete:
662 action = None
662 action = None
663 if deleted:
663 if deleted:
664 if fcd.isabsent():
664 if fcd.isabsent():
665 # dc: local picked. Need to drop if present, which may
665 # dc: local picked. Need to drop if present, which may
666 # happen on re-resolves.
666 # happen on re-resolves.
667 action = ACTION_FORGET
667 action = ACTION_FORGET
668 else:
668 else:
669 # cd: remote picked (or otherwise deleted)
669 # cd: remote picked (or otherwise deleted)
670 action = ACTION_REMOVE
670 action = ACTION_REMOVE
671 else:
671 else:
672 if fcd.isabsent(): # dc: remote picked
672 if fcd.isabsent(): # dc: remote picked
673 action = ACTION_GET
673 action = ACTION_GET
674 elif fco.isabsent(): # cd: local picked
674 elif fco.isabsent(): # cd: local picked
675 if dfile in self.localctx:
675 if dfile in self.localctx:
676 action = ACTION_ADD_MODIFIED
676 action = ACTION_ADD_MODIFIED
677 else:
677 else:
678 action = ACTION_ADD
678 action = ACTION_ADD
679 # else: regular merges (no action necessary)
679 # else: regular merges (no action necessary)
680 self._results[dfile] = r, action
680 self._results[dfile] = r, action
681
681
682 return complete, r
682 return complete, r
683
683
684 def _filectxorabsent(self, hexnode, ctx, f):
684 def _filectxorabsent(self, hexnode, ctx, f):
685 if hexnode == nullhex:
685 if hexnode == nullhex:
686 return filemerge.absentfilectx(ctx, f)
686 return filemerge.absentfilectx(ctx, f)
687 else:
687 else:
688 return ctx[f]
688 return ctx[f]
689
689
690 def preresolve(self, dfile, wctx):
690 def preresolve(self, dfile, wctx):
691 """run premerge process for dfile
691 """run premerge process for dfile
692
692
693 Returns whether the merge is complete, and the exit code."""
693 Returns whether the merge is complete, and the exit code."""
694 return self._resolve(True, dfile, wctx)
694 return self._resolve(True, dfile, wctx)
695
695
696 def resolve(self, dfile, wctx):
696 def resolve(self, dfile, wctx):
697 """run merge process (assuming premerge was run) for dfile
697 """run merge process (assuming premerge was run) for dfile
698
698
699 Returns the exit code of the merge."""
699 Returns the exit code of the merge."""
700 return self._resolve(False, dfile, wctx)[1]
700 return self._resolve(False, dfile, wctx)[1]
701
701
702 def counts(self):
702 def counts(self):
703 """return counts for updated, merged and removed files in this
703 """return counts for updated, merged and removed files in this
704 session"""
704 session"""
705 updated, merged, removed = 0, 0, 0
705 updated, merged, removed = 0, 0, 0
706 for r, action in pycompat.itervalues(self._results):
706 for r, action in pycompat.itervalues(self._results):
707 if r is None:
707 if r is None:
708 updated += 1
708 updated += 1
709 elif r == 0:
709 elif r == 0:
710 if action == ACTION_REMOVE:
710 if action == ACTION_REMOVE:
711 removed += 1
711 removed += 1
712 else:
712 else:
713 merged += 1
713 merged += 1
714 return updated, merged, removed
714 return updated, merged, removed
715
715
716 def unresolvedcount(self):
716 def unresolvedcount(self):
717 """get unresolved count for this merge (persistent)"""
717 """get unresolved count for this merge (persistent)"""
718 return len(list(self.unresolved()))
718 return len(list(self.unresolved()))
719
719
720 def actions(self):
720 def actions(self):
721 """return lists of actions to perform on the dirstate"""
721 """return lists of actions to perform on the dirstate"""
722 actions = {
722 actions = {
723 ACTION_REMOVE: [],
723 ACTION_REMOVE: [],
724 ACTION_FORGET: [],
724 ACTION_FORGET: [],
725 ACTION_ADD: [],
725 ACTION_ADD: [],
726 ACTION_ADD_MODIFIED: [],
726 ACTION_ADD_MODIFIED: [],
727 ACTION_GET: [],
727 ACTION_GET: [],
728 }
728 }
729 for f, (r, action) in pycompat.iteritems(self._results):
729 for f, (r, action) in pycompat.iteritems(self._results):
730 if action is not None:
730 if action is not None:
731 actions[action].append((f, None, b"merge result"))
731 actions[action].append((f, None, b"merge result"))
732 return actions
732 return actions
733
733
734 def recordactions(self):
734 def recordactions(self):
735 """record remove/add/get actions in the dirstate"""
735 """record remove/add/get actions in the dirstate"""
736 branchmerge = self._repo.dirstate.p2() != nullid
736 branchmerge = self._repo.dirstate.p2() != nullid
737 recordupdates(self._repo, self.actions(), branchmerge, None)
737 recordupdates(self._repo, self.actions(), branchmerge, None)
738
738
739 def queueremove(self, f):
739 def queueremove(self, f):
740 """queues a file to be removed from the dirstate
740 """queues a file to be removed from the dirstate
741
741
742 Meant for use by custom merge drivers."""
742 Meant for use by custom merge drivers."""
743 self._results[f] = 0, ACTION_REMOVE
743 self._results[f] = 0, ACTION_REMOVE
744
744
745 def queueadd(self, f):
745 def queueadd(self, f):
746 """queues a file to be added to the dirstate
746 """queues a file to be added to the dirstate
747
747
748 Meant for use by custom merge drivers."""
748 Meant for use by custom merge drivers."""
749 self._results[f] = 0, ACTION_ADD
749 self._results[f] = 0, ACTION_ADD
750
750
751 def queueget(self, f):
751 def queueget(self, f):
752 """queues a file to be marked modified in the dirstate
752 """queues a file to be marked modified in the dirstate
753
753
754 Meant for use by custom merge drivers."""
754 Meant for use by custom merge drivers."""
755 self._results[f] = 0, ACTION_GET
755 self._results[f] = 0, ACTION_GET
756
756
757
757
758 def _getcheckunknownconfig(repo, section, name):
758 def _getcheckunknownconfig(repo, section, name):
759 config = repo.ui.config(section, name)
759 config = repo.ui.config(section, name)
760 valid = [b'abort', b'ignore', b'warn']
760 valid = [b'abort', b'ignore', b'warn']
761 if config not in valid:
761 if config not in valid:
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
763 raise error.ConfigError(
763 raise error.ConfigError(
764 _(b"%s.%s not valid ('%s' is none of %s)")
764 _(b"%s.%s not valid ('%s' is none of %s)")
765 % (section, name, config, validstr)
765 % (section, name, config, validstr)
766 )
766 )
767 return config
767 return config
768
768
769
769
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
771 if wctx.isinmemory():
771 if wctx.isinmemory():
772 # Nothing to do in IMM because nothing in the "working copy" can be an
772 # Nothing to do in IMM because nothing in the "working copy" can be an
773 # unknown file.
773 # unknown file.
774 #
774 #
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
776 # because that function does other useful work.
776 # because that function does other useful work.
777 return False
777 return False
778
778
779 if f2 is None:
779 if f2 is None:
780 f2 = f
780 f2 = f
781 return (
781 return (
782 repo.wvfs.audit.check(f)
782 repo.wvfs.audit.check(f)
783 and repo.wvfs.isfileorlink(f)
783 and repo.wvfs.isfileorlink(f)
784 and repo.dirstate.normalize(f) not in repo.dirstate
784 and repo.dirstate.normalize(f) not in repo.dirstate
785 and mctx[f2].cmp(wctx[f])
785 and mctx[f2].cmp(wctx[f])
786 )
786 )
787
787
788
788
789 class _unknowndirschecker(object):
789 class _unknowndirschecker(object):
790 """
790 """
791 Look for any unknown files or directories that may have a path conflict
791 Look for any unknown files or directories that may have a path conflict
792 with a file. If any path prefix of the file exists as a file or link,
792 with a file. If any path prefix of the file exists as a file or link,
793 then it conflicts. If the file itself is a directory that contains any
793 then it conflicts. If the file itself is a directory that contains any
794 file that is not tracked, then it conflicts.
794 file that is not tracked, then it conflicts.
795
795
796 Returns the shortest path at which a conflict occurs, or None if there is
796 Returns the shortest path at which a conflict occurs, or None if there is
797 no conflict.
797 no conflict.
798 """
798 """
799
799
800 def __init__(self):
800 def __init__(self):
801 # A set of paths known to be good. This prevents repeated checking of
801 # A set of paths known to be good. This prevents repeated checking of
802 # dirs. It will be updated with any new dirs that are checked and found
802 # dirs. It will be updated with any new dirs that are checked and found
803 # to be safe.
803 # to be safe.
804 self._unknowndircache = set()
804 self._unknowndircache = set()
805
805
806 # A set of paths that are known to be absent. This prevents repeated
806 # A set of paths that are known to be absent. This prevents repeated
807 # checking of subdirectories that are known not to exist. It will be
807 # checking of subdirectories that are known not to exist. It will be
808 # updated with any new dirs that are checked and found to be absent.
808 # updated with any new dirs that are checked and found to be absent.
809 self._missingdircache = set()
809 self._missingdircache = set()
810
810
811 def __call__(self, repo, wctx, f):
811 def __call__(self, repo, wctx, f):
812 if wctx.isinmemory():
812 if wctx.isinmemory():
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
814 return False
814 return False
815
815
816 # Check for path prefixes that exist as unknown files.
816 # Check for path prefixes that exist as unknown files.
817 for p in reversed(list(pathutil.finddirs(f))):
817 for p in reversed(list(pathutil.finddirs(f))):
818 if p in self._missingdircache:
818 if p in self._missingdircache:
819 return
819 return
820 if p in self._unknowndircache:
820 if p in self._unknowndircache:
821 continue
821 continue
822 if repo.wvfs.audit.check(p):
822 if repo.wvfs.audit.check(p):
823 if (
823 if (
824 repo.wvfs.isfileorlink(p)
824 repo.wvfs.isfileorlink(p)
825 and repo.dirstate.normalize(p) not in repo.dirstate
825 and repo.dirstate.normalize(p) not in repo.dirstate
826 ):
826 ):
827 return p
827 return p
828 if not repo.wvfs.lexists(p):
828 if not repo.wvfs.lexists(p):
829 self._missingdircache.add(p)
829 self._missingdircache.add(p)
830 return
830 return
831 self._unknowndircache.add(p)
831 self._unknowndircache.add(p)
832
832
833 # Check if the file conflicts with a directory containing unknown files.
833 # Check if the file conflicts with a directory containing unknown files.
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
835 # Does the directory contain any files that are not in the dirstate?
835 # Does the directory contain any files that are not in the dirstate?
836 for p, dirs, files in repo.wvfs.walk(f):
836 for p, dirs, files in repo.wvfs.walk(f):
837 for fn in files:
837 for fn in files:
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
839 relf = repo.dirstate.normalize(relf, isknown=True)
839 relf = repo.dirstate.normalize(relf, isknown=True)
840 if relf not in repo.dirstate:
840 if relf not in repo.dirstate:
841 return f
841 return f
842 return None
842 return None
843
843
844
844
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
846 """
846 """
847 Considers any actions that care about the presence of conflicting unknown
847 Considers any actions that care about the presence of conflicting unknown
848 files. For some actions, the result is to abort; for others, it is to
848 files. For some actions, the result is to abort; for others, it is to
849 choose a different action.
849 choose a different action.
850 """
850 """
851 fileconflicts = set()
851 fileconflicts = set()
852 pathconflicts = set()
852 pathconflicts = set()
853 warnconflicts = set()
853 warnconflicts = set()
854 abortconflicts = set()
854 abortconflicts = set()
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
857 pathconfig = repo.ui.configbool(
857 pathconfig = repo.ui.configbool(
858 b'experimental', b'merge.checkpathconflicts'
858 b'experimental', b'merge.checkpathconflicts'
859 )
859 )
860 if not force:
860 if not force:
861
861
862 def collectconflicts(conflicts, config):
862 def collectconflicts(conflicts, config):
863 if config == b'abort':
863 if config == b'abort':
864 abortconflicts.update(conflicts)
864 abortconflicts.update(conflicts)
865 elif config == b'warn':
865 elif config == b'warn':
866 warnconflicts.update(conflicts)
866 warnconflicts.update(conflicts)
867
867
868 checkunknowndirs = _unknowndirschecker()
868 checkunknowndirs = _unknowndirschecker()
869 for f, (m, args, msg) in pycompat.iteritems(actions):
869 for f, (m, args, msg) in pycompat.iteritems(actions):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
871 if _checkunknownfile(repo, wctx, mctx, f):
871 if _checkunknownfile(repo, wctx, mctx, f):
872 fileconflicts.add(f)
872 fileconflicts.add(f)
873 elif pathconfig and f not in wctx:
873 elif pathconfig and f not in wctx:
874 path = checkunknowndirs(repo, wctx, f)
874 path = checkunknowndirs(repo, wctx, f)
875 if path is not None:
875 if path is not None:
876 pathconflicts.add(path)
876 pathconflicts.add(path)
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
879 fileconflicts.add(f)
879 fileconflicts.add(f)
880
880
881 allconflicts = fileconflicts | pathconflicts
881 allconflicts = fileconflicts | pathconflicts
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
883 unknownconflicts = allconflicts - ignoredconflicts
883 unknownconflicts = allconflicts - ignoredconflicts
884 collectconflicts(ignoredconflicts, ignoredconfig)
884 collectconflicts(ignoredconflicts, ignoredconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
886 else:
886 else:
887 for f, (m, args, msg) in pycompat.iteritems(actions):
887 for f, (m, args, msg) in pycompat.iteritems(actions):
888 if m == ACTION_CREATED_MERGE:
888 if m == ACTION_CREATED_MERGE:
889 fl2, anc = args
889 fl2, anc = args
890 different = _checkunknownfile(repo, wctx, mctx, f)
890 different = _checkunknownfile(repo, wctx, mctx, f)
891 if repo.dirstate._ignore(f):
891 if repo.dirstate._ignore(f):
892 config = ignoredconfig
892 config = ignoredconfig
893 else:
893 else:
894 config = unknownconfig
894 config = unknownconfig
895
895
896 # The behavior when force is True is described by this table:
896 # The behavior when force is True is described by this table:
897 # config different mergeforce | action backup
897 # config different mergeforce | action backup
898 # * n * | get n
898 # * n * | get n
899 # * y y | merge -
899 # * y y | merge -
900 # abort y n | merge - (1)
900 # abort y n | merge - (1)
901 # warn y n | warn + get y
901 # warn y n | warn + get y
902 # ignore y n | get y
902 # ignore y n | get y
903 #
903 #
904 # (1) this is probably the wrong behavior here -- we should
904 # (1) this is probably the wrong behavior here -- we should
905 # probably abort, but some actions like rebases currently
905 # probably abort, but some actions like rebases currently
906 # don't like an abort happening in the middle of
906 # don't like an abort happening in the middle of
907 # merge.update.
907 # merge.update.
908 if not different:
908 if not different:
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
910 elif mergeforce or config == b'abort':
910 elif mergeforce or config == b'abort':
911 actions[f] = (
911 actions[f] = (
912 ACTION_MERGE,
912 ACTION_MERGE,
913 (f, f, None, False, anc),
913 (f, f, None, False, anc),
914 b'remote differs from untracked local',
914 b'remote differs from untracked local',
915 )
915 )
916 elif config == b'abort':
916 elif config == b'abort':
917 abortconflicts.add(f)
917 abortconflicts.add(f)
918 else:
918 else:
919 if config == b'warn':
919 if config == b'warn':
920 warnconflicts.add(f)
920 warnconflicts.add(f)
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
922
922
923 for f in sorted(abortconflicts):
923 for f in sorted(abortconflicts):
924 warn = repo.ui.warn
924 warn = repo.ui.warn
925 if f in pathconflicts:
925 if f in pathconflicts:
926 if repo.wvfs.isfileorlink(f):
926 if repo.wvfs.isfileorlink(f):
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
928 else:
928 else:
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
930 else:
930 else:
931 warn(_(b"%s: untracked file differs\n") % f)
931 warn(_(b"%s: untracked file differs\n") % f)
932 if abortconflicts:
932 if abortconflicts:
933 raise error.Abort(
933 raise error.Abort(
934 _(
934 _(
935 b"untracked files in working directory "
935 b"untracked files in working directory "
936 b"differ from files in requested revision"
936 b"differ from files in requested revision"
937 )
937 )
938 )
938 )
939
939
940 for f in sorted(warnconflicts):
940 for f in sorted(warnconflicts):
941 if repo.wvfs.isfileorlink(f):
941 if repo.wvfs.isfileorlink(f):
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
943 else:
943 else:
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
945
945
946 for f, (m, args, msg) in pycompat.iteritems(actions):
946 for f, (m, args, msg) in pycompat.iteritems(actions):
947 if m == ACTION_CREATED:
947 if m == ACTION_CREATED:
948 backup = (
948 backup = (
949 f in fileconflicts
949 f in fileconflicts
950 or f in pathconflicts
950 or f in pathconflicts
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
952 )
952 )
953 (flags,) = args
953 (flags,) = args
954 actions[f] = (ACTION_GET, (flags, backup), msg)
954 actions[f] = (ACTION_GET, (flags, backup), msg)
955
955
956
956
957 def _forgetremoved(wctx, mctx, branchmerge):
957 def _forgetremoved(wctx, mctx, branchmerge):
958 """
958 """
959 Forget removed files
959 Forget removed files
960
960
961 If we're jumping between revisions (as opposed to merging), and if
961 If we're jumping between revisions (as opposed to merging), and if
962 neither the working directory nor the target rev has the file,
962 neither the working directory nor the target rev has the file,
963 then we need to remove it from the dirstate, to prevent the
963 then we need to remove it from the dirstate, to prevent the
964 dirstate from listing the file when it is no longer in the
964 dirstate from listing the file when it is no longer in the
965 manifest.
965 manifest.
966
966
967 If we're merging, and the other revision has removed a file
967 If we're merging, and the other revision has removed a file
968 that is not present in the working directory, we need to mark it
968 that is not present in the working directory, we need to mark it
969 as removed.
969 as removed.
970 """
970 """
971
971
972 actions = {}
972 actions = {}
973 m = ACTION_FORGET
973 m = ACTION_FORGET
974 if branchmerge:
974 if branchmerge:
975 m = ACTION_REMOVE
975 m = ACTION_REMOVE
976 for f in wctx.deleted():
976 for f in wctx.deleted():
977 if f not in mctx:
977 if f not in mctx:
978 actions[f] = m, None, b"forget deleted"
978 actions[f] = m, None, b"forget deleted"
979
979
980 if not branchmerge:
980 if not branchmerge:
981 for f in wctx.removed():
981 for f in wctx.removed():
982 if f not in mctx:
982 if f not in mctx:
983 actions[f] = ACTION_FORGET, None, b"forget removed"
983 actions[f] = ACTION_FORGET, None, b"forget removed"
984
984
985 return actions
985 return actions
986
986
987
987
988 def _checkcollision(repo, wmf, actions):
988 def _checkcollision(repo, wmf, actions):
989 """
989 """
990 Check for case-folding collisions.
990 Check for case-folding collisions.
991 """
991 """
992
992
993 # If the repo is narrowed, filter out files outside the narrowspec.
993 # If the repo is narrowed, filter out files outside the narrowspec.
994 narrowmatch = repo.narrowmatch()
994 narrowmatch = repo.narrowmatch()
995 if not narrowmatch.always():
995 if not narrowmatch.always():
996 wmf = wmf.matches(narrowmatch)
996 wmf = wmf.matches(narrowmatch)
997 if actions:
997 if actions:
998 narrowactions = {}
998 narrowactions = {}
999 for m, actionsfortype in pycompat.iteritems(actions):
999 for m, actionsfortype in pycompat.iteritems(actions):
1000 narrowactions[m] = []
1000 narrowactions[m] = []
1001 for (f, args, msg) in actionsfortype:
1001 for (f, args, msg) in actionsfortype:
1002 if narrowmatch(f):
1002 if narrowmatch(f):
1003 narrowactions[m].append((f, args, msg))
1003 narrowactions[m].append((f, args, msg))
1004 actions = narrowactions
1004 actions = narrowactions
1005
1005
1006 # build provisional merged manifest up
1006 # build provisional merged manifest up
1007 pmmf = set(wmf)
1007 pmmf = set(wmf)
1008
1008
1009 if actions:
1009 if actions:
1010 # KEEP and EXEC are no-op
1010 # KEEP and EXEC are no-op
1011 for m in (
1011 for m in (
1012 ACTION_ADD,
1012 ACTION_ADD,
1013 ACTION_ADD_MODIFIED,
1013 ACTION_ADD_MODIFIED,
1014 ACTION_FORGET,
1014 ACTION_FORGET,
1015 ACTION_GET,
1015 ACTION_GET,
1016 ACTION_CHANGED_DELETED,
1016 ACTION_CHANGED_DELETED,
1017 ACTION_DELETED_CHANGED,
1017 ACTION_DELETED_CHANGED,
1018 ):
1018 ):
1019 for f, args, msg in actions[m]:
1019 for f, args, msg in actions[m]:
1020 pmmf.add(f)
1020 pmmf.add(f)
1021 for f, args, msg in actions[ACTION_REMOVE]:
1021 for f, args, msg in actions[ACTION_REMOVE]:
1022 pmmf.discard(f)
1022 pmmf.discard(f)
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1024 f2, flags = args
1024 f2, flags = args
1025 pmmf.discard(f2)
1025 pmmf.discard(f2)
1026 pmmf.add(f)
1026 pmmf.add(f)
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1028 pmmf.add(f)
1028 pmmf.add(f)
1029 for f, args, msg in actions[ACTION_MERGE]:
1029 for f, args, msg in actions[ACTION_MERGE]:
1030 f1, f2, fa, move, anc = args
1030 f1, f2, fa, move, anc = args
1031 if move:
1031 if move:
1032 pmmf.discard(f1)
1032 pmmf.discard(f1)
1033 pmmf.add(f)
1033 pmmf.add(f)
1034
1034
1035 # check case-folding collision in provisional merged manifest
1035 # check case-folding collision in provisional merged manifest
1036 foldmap = {}
1036 foldmap = {}
1037 for f in pmmf:
1037 for f in pmmf:
1038 fold = util.normcase(f)
1038 fold = util.normcase(f)
1039 if fold in foldmap:
1039 if fold in foldmap:
1040 raise error.Abort(
1040 raise error.Abort(
1041 _(b"case-folding collision between %s and %s")
1041 _(b"case-folding collision between %s and %s")
1042 % (f, foldmap[fold])
1042 % (f, foldmap[fold])
1043 )
1043 )
1044 foldmap[fold] = f
1044 foldmap[fold] = f
1045
1045
1046 # check case-folding of directories
1046 # check case-folding of directories
1047 foldprefix = unfoldprefix = lastfull = b''
1047 foldprefix = unfoldprefix = lastfull = b''
1048 for fold, f in sorted(foldmap.items()):
1048 for fold, f in sorted(foldmap.items()):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1050 # the folded prefix matches but actual casing is different
1050 # the folded prefix matches but actual casing is different
1051 raise error.Abort(
1051 raise error.Abort(
1052 _(b"case-folding collision between %s and directory of %s")
1052 _(b"case-folding collision between %s and directory of %s")
1053 % (lastfull, f)
1053 % (lastfull, f)
1054 )
1054 )
1055 foldprefix = fold + b'/'
1055 foldprefix = fold + b'/'
1056 unfoldprefix = f + b'/'
1056 unfoldprefix = f + b'/'
1057 lastfull = f
1057 lastfull = f
1058
1058
1059
1059
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1061 """run the preprocess step of the merge driver, if any
1061 """run the preprocess step of the merge driver, if any
1062
1062
1063 This is currently not implemented -- it's an extension point."""
1063 This is currently not implemented -- it's an extension point."""
1064 return True
1064 return True
1065
1065
1066
1066
1067 def driverconclude(repo, ms, wctx, labels=None):
1067 def driverconclude(repo, ms, wctx, labels=None):
1068 """run the conclude step of the merge driver, if any
1068 """run the conclude step of the merge driver, if any
1069
1069
1070 This is currently not implemented -- it's an extension point."""
1070 This is currently not implemented -- it's an extension point."""
1071 return True
1071 return True
1072
1072
1073
1073
1074 def _filesindirs(repo, manifest, dirs):
1074 def _filesindirs(repo, manifest, dirs):
1075 """
1075 """
1076 Generator that yields pairs of all the files in the manifest that are found
1076 Generator that yields pairs of all the files in the manifest that are found
1077 inside the directories listed in dirs, and which directory they are found
1077 inside the directories listed in dirs, and which directory they are found
1078 in.
1078 in.
1079 """
1079 """
1080 for f in manifest:
1080 for f in manifest:
1081 for p in pathutil.finddirs(f):
1081 for p in pathutil.finddirs(f):
1082 if p in dirs:
1082 if p in dirs:
1083 yield f, p
1083 yield f, p
1084 break
1084 break
1085
1085
1086
1086
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1088 """
1088 """
1089 Check if any actions introduce path conflicts in the repository, updating
1089 Check if any actions introduce path conflicts in the repository, updating
1090 actions to record or handle the path conflict accordingly.
1090 actions to record or handle the path conflict accordingly.
1091 """
1091 """
1092 mf = wctx.manifest()
1092 mf = wctx.manifest()
1093
1093
1094 # The set of local files that conflict with a remote directory.
1094 # The set of local files that conflict with a remote directory.
1095 localconflicts = set()
1095 localconflicts = set()
1096
1096
1097 # The set of directories that conflict with a remote file, and so may cause
1097 # The set of directories that conflict with a remote file, and so may cause
1098 # conflicts if they still contain any files after the merge.
1098 # conflicts if they still contain any files after the merge.
1099 remoteconflicts = set()
1099 remoteconflicts = set()
1100
1100
1101 # The set of directories that appear as both a file and a directory in the
1101 # The set of directories that appear as both a file and a directory in the
1102 # remote manifest. These indicate an invalid remote manifest, which
1102 # remote manifest. These indicate an invalid remote manifest, which
1103 # can't be updated to cleanly.
1103 # can't be updated to cleanly.
1104 invalidconflicts = set()
1104 invalidconflicts = set()
1105
1105
1106 # The set of directories that contain files that are being created.
1106 # The set of directories that contain files that are being created.
1107 createdfiledirs = set()
1107 createdfiledirs = set()
1108
1108
1109 # The set of files deleted by all the actions.
1109 # The set of files deleted by all the actions.
1110 deletedfiles = set()
1110 deletedfiles = set()
1111
1111
1112 for f, (m, args, msg) in actions.items():
1112 for f, (m, args, msg) in actions.items():
1113 if m in (
1113 if m in (
1114 ACTION_CREATED,
1114 ACTION_CREATED,
1115 ACTION_DELETED_CHANGED,
1115 ACTION_DELETED_CHANGED,
1116 ACTION_MERGE,
1116 ACTION_MERGE,
1117 ACTION_CREATED_MERGE,
1117 ACTION_CREATED_MERGE,
1118 ):
1118 ):
1119 # This action may create a new local file.
1119 # This action may create a new local file.
1120 createdfiledirs.update(pathutil.finddirs(f))
1120 createdfiledirs.update(pathutil.finddirs(f))
1121 if mf.hasdir(f):
1121 if mf.hasdir(f):
1122 # The file aliases a local directory. This might be ok if all
1122 # The file aliases a local directory. This might be ok if all
1123 # the files in the local directory are being deleted. This
1123 # the files in the local directory are being deleted. This
1124 # will be checked once we know what all the deleted files are.
1124 # will be checked once we know what all the deleted files are.
1125 remoteconflicts.add(f)
1125 remoteconflicts.add(f)
1126 # Track the names of all deleted files.
1126 # Track the names of all deleted files.
1127 if m == ACTION_REMOVE:
1127 if m == ACTION_REMOVE:
1128 deletedfiles.add(f)
1128 deletedfiles.add(f)
1129 if m == ACTION_MERGE:
1129 if m == ACTION_MERGE:
1130 f1, f2, fa, move, anc = args
1130 f1, f2, fa, move, anc = args
1131 if move:
1131 if move:
1132 deletedfiles.add(f1)
1132 deletedfiles.add(f1)
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1134 f2, flags = args
1134 f2, flags = args
1135 deletedfiles.add(f2)
1135 deletedfiles.add(f2)
1136
1136
1137 # Check all directories that contain created files for path conflicts.
1137 # Check all directories that contain created files for path conflicts.
1138 for p in createdfiledirs:
1138 for p in createdfiledirs:
1139 if p in mf:
1139 if p in mf:
1140 if p in mctx:
1140 if p in mctx:
1141 # A file is in a directory which aliases both a local
1141 # A file is in a directory which aliases both a local
1142 # and a remote file. This is an internal inconsistency
1142 # and a remote file. This is an internal inconsistency
1143 # within the remote manifest.
1143 # within the remote manifest.
1144 invalidconflicts.add(p)
1144 invalidconflicts.add(p)
1145 else:
1145 else:
1146 # A file is in a directory which aliases a local file.
1146 # A file is in a directory which aliases a local file.
1147 # We will need to rename the local file.
1147 # We will need to rename the local file.
1148 localconflicts.add(p)
1148 localconflicts.add(p)
1149 if p in actions and actions[p][0] in (
1149 if p in actions and actions[p][0] in (
1150 ACTION_CREATED,
1150 ACTION_CREATED,
1151 ACTION_DELETED_CHANGED,
1151 ACTION_DELETED_CHANGED,
1152 ACTION_MERGE,
1152 ACTION_MERGE,
1153 ACTION_CREATED_MERGE,
1153 ACTION_CREATED_MERGE,
1154 ):
1154 ):
1155 # The file is in a directory which aliases a remote file.
1155 # The file is in a directory which aliases a remote file.
1156 # This is an internal inconsistency within the remote
1156 # This is an internal inconsistency within the remote
1157 # manifest.
1157 # manifest.
1158 invalidconflicts.add(p)
1158 invalidconflicts.add(p)
1159
1159
1160 # Rename all local conflicting files that have not been deleted.
1160 # Rename all local conflicting files that have not been deleted.
1161 for p in localconflicts:
1161 for p in localconflicts:
1162 if p not in deletedfiles:
1162 if p not in deletedfiles:
1163 ctxname = bytes(wctx).rstrip(b'+')
1163 ctxname = bytes(wctx).rstrip(b'+')
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1165 actions[pnew] = (
1165 actions[pnew] = (
1166 ACTION_PATH_CONFLICT_RESOLVE,
1166 ACTION_PATH_CONFLICT_RESOLVE,
1167 (p,),
1167 (p,),
1168 b'local path conflict',
1168 b'local path conflict',
1169 )
1169 )
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1171
1171
1172 if remoteconflicts:
1172 if remoteconflicts:
1173 # Check if all files in the conflicting directories have been removed.
1173 # Check if all files in the conflicting directories have been removed.
1174 ctxname = bytes(mctx).rstrip(b'+')
1174 ctxname = bytes(mctx).rstrip(b'+')
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1176 if f not in deletedfiles:
1176 if f not in deletedfiles:
1177 m, args, msg = actions[p]
1177 m, args, msg = actions[p]
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1180 # Action was merge, just update target.
1180 # Action was merge, just update target.
1181 actions[pnew] = (m, args, msg)
1181 actions[pnew] = (m, args, msg)
1182 else:
1182 else:
1183 # Action was create, change to renamed get action.
1183 # Action was create, change to renamed get action.
1184 fl = args[0]
1184 fl = args[0]
1185 actions[pnew] = (
1185 actions[pnew] = (
1186 ACTION_LOCAL_DIR_RENAME_GET,
1186 ACTION_LOCAL_DIR_RENAME_GET,
1187 (p, fl),
1187 (p, fl),
1188 b'remote path conflict',
1188 b'remote path conflict',
1189 )
1189 )
1190 actions[p] = (
1190 actions[p] = (
1191 ACTION_PATH_CONFLICT,
1191 ACTION_PATH_CONFLICT,
1192 (pnew, ACTION_REMOVE),
1192 (pnew, ACTION_REMOVE),
1193 b'path conflict',
1193 b'path conflict',
1194 )
1194 )
1195 remoteconflicts.remove(p)
1195 remoteconflicts.remove(p)
1196 break
1196 break
1197
1197
1198 if invalidconflicts:
1198 if invalidconflicts:
1199 for p in invalidconflicts:
1199 for p in invalidconflicts:
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1202
1202
1203
1203
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1205 """
1205 """
1206 Filters out actions that can ignored because the repo is narrowed.
1206 Filters out actions that can ignored because the repo is narrowed.
1207
1207
1208 Raise an exception if the merge cannot be completed because the repo is
1208 Raise an exception if the merge cannot be completed because the repo is
1209 narrowed.
1209 narrowed.
1210 """
1210 """
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1213 # We mutate the items in the dict during iteration, so iterate
1213 # We mutate the items in the dict during iteration, so iterate
1214 # over a copy.
1214 # over a copy.
1215 for f, action in list(actions.items()):
1215 for f, action in list(actions.items()):
1216 if narrowmatch(f):
1216 if narrowmatch(f):
1217 pass
1217 pass
1218 elif not branchmerge:
1218 elif not branchmerge:
1219 del actions[f] # just updating, ignore changes outside clone
1219 del actions[f] # just updating, ignore changes outside clone
1220 elif action[0] in nooptypes:
1220 elif action[0] in nooptypes:
1221 del actions[f] # merge does not affect file
1221 del actions[f] # merge does not affect file
1222 elif action[0] in nonconflicttypes:
1222 elif action[0] in nonconflicttypes:
1223 raise error.Abort(
1223 raise error.Abort(
1224 _(
1224 _(
1225 b'merge affects file \'%s\' outside narrow, '
1225 b'merge affects file \'%s\' outside narrow, '
1226 b'which is not yet supported'
1226 b'which is not yet supported'
1227 )
1227 )
1228 % f,
1228 % f,
1229 hint=_(b'merging in the other direction may work'),
1229 hint=_(b'merging in the other direction may work'),
1230 )
1230 )
1231 else:
1231 else:
1232 raise error.Abort(
1232 raise error.Abort(
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1234 )
1234 )
1235
1235
1236
1236
1237 def manifestmerge(
1237 def manifestmerge(
1238 repo,
1238 repo,
1239 wctx,
1239 wctx,
1240 p2,
1240 p2,
1241 pa,
1241 pa,
1242 branchmerge,
1242 branchmerge,
1243 force,
1243 force,
1244 matcher,
1244 matcher,
1245 acceptremote,
1245 acceptremote,
1246 followcopies,
1246 followcopies,
1247 forcefulldiff=False,
1247 forcefulldiff=False,
1248 ):
1248 ):
1249 """
1249 """
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1251
1251
1252 branchmerge and force are as passed in to update
1252 branchmerge and force are as passed in to update
1253 matcher = matcher to filter file lists
1253 matcher = matcher to filter file lists
1254 acceptremote = accept the incoming changes without prompting
1254 acceptremote = accept the incoming changes without prompting
1255 """
1255 """
1256 if matcher is not None and matcher.always():
1256 if matcher is not None and matcher.always():
1257 matcher = None
1257 matcher = None
1258
1258
1259 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1259 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1260
1260
1261 # manifests fetched in order are going to be faster, so prime the caches
1261 # manifests fetched in order are going to be faster, so prime the caches
1262 [
1262 [
1263 x.manifest()
1263 x.manifest()
1264 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1264 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1265 ]
1265 ]
1266
1266
1267 if followcopies:
1267 if followcopies:
1268 ret = copies.mergecopies(repo, wctx, p2, pa)
1268 ret = copies.mergecopies(repo, wctx, p2, pa)
1269 copy, movewithdir, diverge, renamedelete, dirmove = ret
1269 copy, movewithdir, diverge, renamedelete, dirmove = ret
1270
1270
1271 boolbm = pycompat.bytestr(bool(branchmerge))
1271 boolbm = pycompat.bytestr(bool(branchmerge))
1272 boolf = pycompat.bytestr(bool(force))
1272 boolf = pycompat.bytestr(bool(force))
1273 boolm = pycompat.bytestr(bool(matcher))
1273 boolm = pycompat.bytestr(bool(matcher))
1274 repo.ui.note(_(b"resolving manifests\n"))
1274 repo.ui.note(_(b"resolving manifests\n"))
1275 repo.ui.debug(
1275 repo.ui.debug(
1276 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1276 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1277 )
1277 )
1278 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1278 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1279
1279
1280 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1280 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1281 copied = set(copy.values())
1281 copied = set(copy.values())
1282 copied.update(movewithdir.values())
1282 copied.update(movewithdir.values())
1283
1283
1284 if b'.hgsubstate' in m1 and wctx.rev() is None:
1284 if b'.hgsubstate' in m1 and wctx.rev() is None:
1285 # Check whether sub state is modified, and overwrite the manifest
1285 # Check whether sub state is modified, and overwrite the manifest
1286 # to flag the change. If wctx is a committed revision, we shouldn't
1286 # to flag the change. If wctx is a committed revision, we shouldn't
1287 # care for the dirty state of the working directory.
1287 # care for the dirty state of the working directory.
1288 if any(wctx.sub(s).dirty() for s in wctx.substate):
1288 if any(wctx.sub(s).dirty() for s in wctx.substate):
1289 m1[b'.hgsubstate'] = modifiednodeid
1289 m1[b'.hgsubstate'] = modifiednodeid
1290
1290
1291 # Don't use m2-vs-ma optimization if:
1291 # Don't use m2-vs-ma optimization if:
1292 # - ma is the same as m1 or m2, which we're just going to diff again later
1292 # - ma is the same as m1 or m2, which we're just going to diff again later
1293 # - The caller specifically asks for a full diff, which is useful during bid
1293 # - The caller specifically asks for a full diff, which is useful during bid
1294 # merge.
1294 # merge.
1295 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1295 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1296 # Identify which files are relevant to the merge, so we can limit the
1296 # Identify which files are relevant to the merge, so we can limit the
1297 # total m1-vs-m2 diff to just those files. This has significant
1297 # total m1-vs-m2 diff to just those files. This has significant
1298 # performance benefits in large repositories.
1298 # performance benefits in large repositories.
1299 relevantfiles = set(ma.diff(m2).keys())
1299 relevantfiles = set(ma.diff(m2).keys())
1300
1300
1301 # For copied and moved files, we need to add the source file too.
1301 # For copied and moved files, we need to add the source file too.
1302 for copykey, copyvalue in pycompat.iteritems(copy):
1302 for copykey, copyvalue in pycompat.iteritems(copy):
1303 if copyvalue in relevantfiles:
1303 if copyvalue in relevantfiles:
1304 relevantfiles.add(copykey)
1304 relevantfiles.add(copykey)
1305 for movedirkey in movewithdir:
1305 for movedirkey in movewithdir:
1306 relevantfiles.add(movedirkey)
1306 relevantfiles.add(movedirkey)
1307 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1307 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1308 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1308 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1309
1309
1310 diff = m1.diff(m2, match=matcher)
1310 diff = m1.diff(m2, match=matcher)
1311
1311
1312 actions = {}
1312 actions = {}
1313 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1313 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1314 if n1 and n2: # file exists on both local and remote side
1314 if n1 and n2: # file exists on both local and remote side
1315 if f not in ma:
1315 if f not in ma:
1316 fa = copy.get(f, None)
1316 fa = copy.get(f, None)
1317 if fa is not None:
1317 if fa is not None:
1318 actions[f] = (
1318 actions[f] = (
1319 ACTION_MERGE,
1319 ACTION_MERGE,
1320 (f, f, fa, False, pa.node()),
1320 (f, f, fa, False, pa.node()),
1321 b'both renamed from %s' % fa,
1321 b'both renamed from %s' % fa,
1322 )
1322 )
1323 else:
1323 else:
1324 actions[f] = (
1324 actions[f] = (
1325 ACTION_MERGE,
1325 ACTION_MERGE,
1326 (f, f, None, False, pa.node()),
1326 (f, f, None, False, pa.node()),
1327 b'both created',
1327 b'both created',
1328 )
1328 )
1329 else:
1329 else:
1330 a = ma[f]
1330 a = ma[f]
1331 fla = ma.flags(f)
1331 fla = ma.flags(f)
1332 nol = b'l' not in fl1 + fl2 + fla
1332 nol = b'l' not in fl1 + fl2 + fla
1333 if n2 == a and fl2 == fla:
1333 if n2 == a and fl2 == fla:
1334 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1334 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1335 elif n1 == a and fl1 == fla: # local unchanged - use remote
1335 elif n1 == a and fl1 == fla: # local unchanged - use remote
1336 if n1 == n2: # optimization: keep local content
1336 if n1 == n2: # optimization: keep local content
1337 actions[f] = (
1337 actions[f] = (
1338 ACTION_EXEC,
1338 ACTION_EXEC,
1339 (fl2,),
1339 (fl2,),
1340 b'update permissions',
1340 b'update permissions',
1341 )
1341 )
1342 else:
1342 else:
1343 actions[f] = (
1343 actions[f] = (
1344 ACTION_GET,
1344 ACTION_GET,
1345 (fl2, False),
1345 (fl2, False),
1346 b'remote is newer',
1346 b'remote is newer',
1347 )
1347 )
1348 elif nol and n2 == a: # remote only changed 'x'
1348 elif nol and n2 == a: # remote only changed 'x'
1349 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1349 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1350 elif nol and n1 == a: # local only changed 'x'
1350 elif nol and n1 == a: # local only changed 'x'
1351 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1351 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1352 else: # both changed something
1352 else: # both changed something
1353 actions[f] = (
1353 actions[f] = (
1354 ACTION_MERGE,
1354 ACTION_MERGE,
1355 (f, f, f, False, pa.node()),
1355 (f, f, f, False, pa.node()),
1356 b'versions differ',
1356 b'versions differ',
1357 )
1357 )
1358 elif n1: # file exists only on local side
1358 elif n1: # file exists only on local side
1359 if f in copied:
1359 if f in copied:
1360 pass # we'll deal with it on m2 side
1360 pass # we'll deal with it on m2 side
1361 elif f in movewithdir: # directory rename, move local
1361 elif f in movewithdir: # directory rename, move local
1362 f2 = movewithdir[f]
1362 f2 = movewithdir[f]
1363 if f2 in m2:
1363 if f2 in m2:
1364 actions[f2] = (
1364 actions[f2] = (
1365 ACTION_MERGE,
1365 ACTION_MERGE,
1366 (f, f2, None, True, pa.node()),
1366 (f, f2, None, True, pa.node()),
1367 b'remote directory rename, both created',
1367 b'remote directory rename, both created',
1368 )
1368 )
1369 else:
1369 else:
1370 actions[f2] = (
1370 actions[f2] = (
1371 ACTION_DIR_RENAME_MOVE_LOCAL,
1371 ACTION_DIR_RENAME_MOVE_LOCAL,
1372 (f, fl1),
1372 (f, fl1),
1373 b'remote directory rename - move from %s' % f,
1373 b'remote directory rename - move from %s' % f,
1374 )
1374 )
1375 elif f in copy:
1375 elif f in copy:
1376 f2 = copy[f]
1376 f2 = copy[f]
1377 actions[f] = (
1377 actions[f] = (
1378 ACTION_MERGE,
1378 ACTION_MERGE,
1379 (f, f2, f2, False, pa.node()),
1379 (f, f2, f2, False, pa.node()),
1380 b'local copied/moved from %s' % f2,
1380 b'local copied/moved from %s' % f2,
1381 )
1381 )
1382 elif f in ma: # clean, a different, no remote
1382 elif f in ma: # clean, a different, no remote
1383 if n1 != ma[f]:
1383 if n1 != ma[f]:
1384 if acceptremote:
1384 if acceptremote:
1385 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1385 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1386 else:
1386 else:
1387 actions[f] = (
1387 actions[f] = (
1388 ACTION_CHANGED_DELETED,
1388 ACTION_CHANGED_DELETED,
1389 (f, None, f, False, pa.node()),
1389 (f, None, f, False, pa.node()),
1390 b'prompt changed/deleted',
1390 b'prompt changed/deleted',
1391 )
1391 )
1392 elif n1 == addednodeid:
1392 elif n1 == addednodeid:
1393 # This extra 'a' is added by working copy manifest to mark
1393 # This extra 'a' is added by working copy manifest to mark
1394 # the file as locally added. We should forget it instead of
1394 # the file as locally added. We should forget it instead of
1395 # deleting it.
1395 # deleting it.
1396 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1396 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1397 else:
1397 else:
1398 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1398 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1399 elif n2: # file exists only on remote side
1399 elif n2: # file exists only on remote side
1400 if f in copied:
1400 if f in copied:
1401 pass # we'll deal with it on m1 side
1401 pass # we'll deal with it on m1 side
1402 elif f in movewithdir:
1402 elif f in movewithdir:
1403 f2 = movewithdir[f]
1403 f2 = movewithdir[f]
1404 if f2 in m1:
1404 if f2 in m1:
1405 actions[f2] = (
1405 actions[f2] = (
1406 ACTION_MERGE,
1406 ACTION_MERGE,
1407 (f2, f, None, False, pa.node()),
1407 (f2, f, None, False, pa.node()),
1408 b'local directory rename, both created',
1408 b'local directory rename, both created',
1409 )
1409 )
1410 else:
1410 else:
1411 actions[f2] = (
1411 actions[f2] = (
1412 ACTION_LOCAL_DIR_RENAME_GET,
1412 ACTION_LOCAL_DIR_RENAME_GET,
1413 (f, fl2),
1413 (f, fl2),
1414 b'local directory rename - get from %s' % f,
1414 b'local directory rename - get from %s' % f,
1415 )
1415 )
1416 elif f in copy:
1416 elif f in copy:
1417 f2 = copy[f]
1417 f2 = copy[f]
1418 if f2 in m2:
1418 if f2 in m2:
1419 actions[f] = (
1419 actions[f] = (
1420 ACTION_MERGE,
1420 ACTION_MERGE,
1421 (f2, f, f2, False, pa.node()),
1421 (f2, f, f2, False, pa.node()),
1422 b'remote copied from %s' % f2,
1422 b'remote copied from %s' % f2,
1423 )
1423 )
1424 else:
1424 else:
1425 actions[f] = (
1425 actions[f] = (
1426 ACTION_MERGE,
1426 ACTION_MERGE,
1427 (f2, f, f2, True, pa.node()),
1427 (f2, f, f2, True, pa.node()),
1428 b'remote moved from %s' % f2,
1428 b'remote moved from %s' % f2,
1429 )
1429 )
1430 elif f not in ma:
1430 elif f not in ma:
1431 # local unknown, remote created: the logic is described by the
1431 # local unknown, remote created: the logic is described by the
1432 # following table:
1432 # following table:
1433 #
1433 #
1434 # force branchmerge different | action
1434 # force branchmerge different | action
1435 # n * * | create
1435 # n * * | create
1436 # y n * | create
1436 # y n * | create
1437 # y y n | create
1437 # y y n | create
1438 # y y y | merge
1438 # y y y | merge
1439 #
1439 #
1440 # Checking whether the files are different is expensive, so we
1440 # Checking whether the files are different is expensive, so we
1441 # don't do that when we can avoid it.
1441 # don't do that when we can avoid it.
1442 if not force:
1442 if not force:
1443 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1443 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1444 elif not branchmerge:
1444 elif not branchmerge:
1445 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1445 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1446 else:
1446 else:
1447 actions[f] = (
1447 actions[f] = (
1448 ACTION_CREATED_MERGE,
1448 ACTION_CREATED_MERGE,
1449 (fl2, pa.node()),
1449 (fl2, pa.node()),
1450 b'remote created, get or merge',
1450 b'remote created, get or merge',
1451 )
1451 )
1452 elif n2 != ma[f]:
1452 elif n2 != ma[f]:
1453 df = None
1453 df = None
1454 for d in dirmove:
1454 for d in dirmove:
1455 if f.startswith(d):
1455 if f.startswith(d):
1456 # new file added in a directory that was moved
1456 # new file added in a directory that was moved
1457 df = dirmove[d] + f[len(d) :]
1457 df = dirmove[d] + f[len(d) :]
1458 break
1458 break
1459 if df is not None and df in m1:
1459 if df is not None and df in m1:
1460 actions[df] = (
1460 actions[df] = (
1461 ACTION_MERGE,
1461 ACTION_MERGE,
1462 (df, f, f, False, pa.node()),
1462 (df, f, f, False, pa.node()),
1463 b'local directory rename - respect move '
1463 b'local directory rename - respect move '
1464 b'from %s' % f,
1464 b'from %s' % f,
1465 )
1465 )
1466 elif acceptremote:
1466 elif acceptremote:
1467 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1467 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1468 else:
1468 else:
1469 actions[f] = (
1469 actions[f] = (
1470 ACTION_DELETED_CHANGED,
1470 ACTION_DELETED_CHANGED,
1471 (None, f, f, False, pa.node()),
1471 (None, f, f, False, pa.node()),
1472 b'prompt deleted/changed',
1472 b'prompt deleted/changed',
1473 )
1473 )
1474
1474
1475 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1475 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1476 # If we are merging, look for path conflicts.
1476 # If we are merging, look for path conflicts.
1477 checkpathconflicts(repo, wctx, p2, actions)
1477 checkpathconflicts(repo, wctx, p2, actions)
1478
1478
1479 narrowmatch = repo.narrowmatch()
1479 narrowmatch = repo.narrowmatch()
1480 if not narrowmatch.always():
1480 if not narrowmatch.always():
1481 # Updates "actions" in place
1481 # Updates "actions" in place
1482 _filternarrowactions(narrowmatch, branchmerge, actions)
1482 _filternarrowactions(narrowmatch, branchmerge, actions)
1483
1483
1484 return actions, diverge, renamedelete
1484 return actions, diverge, renamedelete
1485
1485
1486
1486
1487 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1487 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1488 """Resolves false conflicts where the nodeid changed but the content
1488 """Resolves false conflicts where the nodeid changed but the content
1489 remained the same."""
1489 remained the same."""
1490 # We force a copy of actions.items() because we're going to mutate
1490 # We force a copy of actions.items() because we're going to mutate
1491 # actions as we resolve trivial conflicts.
1491 # actions as we resolve trivial conflicts.
1492 for f, (m, args, msg) in list(actions.items()):
1492 for f, (m, args, msg) in list(actions.items()):
1493 if (
1493 if (
1494 m == ACTION_CHANGED_DELETED
1494 m == ACTION_CHANGED_DELETED
1495 and f in ancestor
1495 and f in ancestor
1496 and not wctx[f].cmp(ancestor[f])
1496 and not wctx[f].cmp(ancestor[f])
1497 ):
1497 ):
1498 # local did change but ended up with same content
1498 # local did change but ended up with same content
1499 actions[f] = ACTION_REMOVE, None, b'prompt same'
1499 actions[f] = ACTION_REMOVE, None, b'prompt same'
1500 elif (
1500 elif (
1501 m == ACTION_DELETED_CHANGED
1501 m == ACTION_DELETED_CHANGED
1502 and f in ancestor
1502 and f in ancestor
1503 and not mctx[f].cmp(ancestor[f])
1503 and not mctx[f].cmp(ancestor[f])
1504 ):
1504 ):
1505 # remote did change but ended up with same content
1505 # remote did change but ended up with same content
1506 del actions[f] # don't get = keep local deleted
1506 del actions[f] # don't get = keep local deleted
1507
1507
1508
1508
1509 def calculateupdates(
1509 def calculateupdates(
1510 repo,
1510 repo,
1511 wctx,
1511 wctx,
1512 mctx,
1512 mctx,
1513 ancestors,
1513 ancestors,
1514 branchmerge,
1514 branchmerge,
1515 force,
1515 force,
1516 acceptremote,
1516 acceptremote,
1517 followcopies,
1517 followcopies,
1518 matcher=None,
1518 matcher=None,
1519 mergeforce=False,
1519 mergeforce=False,
1520 ):
1520 ):
1521 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1521 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1522 # Avoid cycle.
1522 # Avoid cycle.
1523 from . import sparse
1523 from . import sparse
1524
1524
1525 if len(ancestors) == 1: # default
1525 if len(ancestors) == 1: # default
1526 actions, diverge, renamedelete = manifestmerge(
1526 actions, diverge, renamedelete = manifestmerge(
1527 repo,
1527 repo,
1528 wctx,
1528 wctx,
1529 mctx,
1529 mctx,
1530 ancestors[0],
1530 ancestors[0],
1531 branchmerge,
1531 branchmerge,
1532 force,
1532 force,
1533 matcher,
1533 matcher,
1534 acceptremote,
1534 acceptremote,
1535 followcopies,
1535 followcopies,
1536 )
1536 )
1537 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1537 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1538
1538
1539 else: # only when merge.preferancestor=* - the default
1539 else: # only when merge.preferancestor=* - the default
1540 repo.ui.note(
1540 repo.ui.note(
1541 _(b"note: merging %s and %s using bids from ancestors %s\n")
1541 _(b"note: merging %s and %s using bids from ancestors %s\n")
1542 % (
1542 % (
1543 wctx,
1543 wctx,
1544 mctx,
1544 mctx,
1545 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1545 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1546 )
1546 )
1547 )
1547 )
1548
1548
1549 # Call for bids
1549 # Call for bids
1550 fbids = (
1550 fbids = (
1551 {}
1551 {}
1552 ) # mapping filename to bids (action method to list af actions)
1552 ) # mapping filename to bids (action method to list af actions)
1553 diverge, renamedelete = None, None
1553 diverge, renamedelete = None, None
1554 for ancestor in ancestors:
1554 for ancestor in ancestors:
1555 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1555 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1556 actions, diverge1, renamedelete1 = manifestmerge(
1556 actions, diverge1, renamedelete1 = manifestmerge(
1557 repo,
1557 repo,
1558 wctx,
1558 wctx,
1559 mctx,
1559 mctx,
1560 ancestor,
1560 ancestor,
1561 branchmerge,
1561 branchmerge,
1562 force,
1562 force,
1563 matcher,
1563 matcher,
1564 acceptremote,
1564 acceptremote,
1565 followcopies,
1565 followcopies,
1566 forcefulldiff=True,
1566 forcefulldiff=True,
1567 )
1567 )
1568 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1568 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1569
1569
1570 # Track the shortest set of warning on the theory that bid
1570 # Track the shortest set of warning on the theory that bid
1571 # merge will correctly incorporate more information
1571 # merge will correctly incorporate more information
1572 if diverge is None or len(diverge1) < len(diverge):
1572 if diverge is None or len(diverge1) < len(diverge):
1573 diverge = diverge1
1573 diverge = diverge1
1574 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1574 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1575 renamedelete = renamedelete1
1575 renamedelete = renamedelete1
1576
1576
1577 for f, a in sorted(pycompat.iteritems(actions)):
1577 for f, a in sorted(pycompat.iteritems(actions)):
1578 m, args, msg = a
1578 m, args, msg = a
1579 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1579 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1580 if f in fbids:
1580 if f in fbids:
1581 d = fbids[f]
1581 d = fbids[f]
1582 if m in d:
1582 if m in d:
1583 d[m].append(a)
1583 d[m].append(a)
1584 else:
1584 else:
1585 d[m] = [a]
1585 d[m] = [a]
1586 else:
1586 else:
1587 fbids[f] = {m: [a]}
1587 fbids[f] = {m: [a]}
1588
1588
1589 # Pick the best bid for each file
1589 # Pick the best bid for each file
1590 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1590 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1591 actions = {}
1591 actions = {}
1592 for f, bids in sorted(fbids.items()):
1592 for f, bids in sorted(fbids.items()):
1593 # bids is a mapping from action method to list af actions
1593 # bids is a mapping from action method to list af actions
1594 # Consensus?
1594 # Consensus?
1595 if len(bids) == 1: # all bids are the same kind of method
1595 if len(bids) == 1: # all bids are the same kind of method
1596 m, l = list(bids.items())[0]
1596 m, l = list(bids.items())[0]
1597 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1597 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1598 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1598 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1599 actions[f] = l[0]
1599 actions[f] = l[0]
1600 continue
1600 continue
1601 # If keep is an option, just do it.
1601 # If keep is an option, just do it.
1602 if ACTION_KEEP in bids:
1602 if ACTION_KEEP in bids:
1603 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1603 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1604 actions[f] = bids[ACTION_KEEP][0]
1604 actions[f] = bids[ACTION_KEEP][0]
1605 continue
1605 continue
1606 # If there are gets and they all agree [how could they not?], do it.
1606 # If there are gets and they all agree [how could they not?], do it.
1607 if ACTION_GET in bids:
1607 if ACTION_GET in bids:
1608 ga0 = bids[ACTION_GET][0]
1608 ga0 = bids[ACTION_GET][0]
1609 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1609 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1610 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1610 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1611 actions[f] = ga0
1611 actions[f] = ga0
1612 continue
1612 continue
1613 # TODO: Consider other simple actions such as mode changes
1613 # TODO: Consider other simple actions such as mode changes
1614 # Handle inefficient democrazy.
1614 # Handle inefficient democrazy.
1615 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1615 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1616 for m, l in sorted(bids.items()):
1616 for m, l in sorted(bids.items()):
1617 for _f, args, msg in l:
1617 for _f, args, msg in l:
1618 repo.ui.note(b' %s -> %s\n' % (msg, m))
1618 repo.ui.note(b' %s -> %s\n' % (msg, m))
1619 # Pick random action. TODO: Instead, prompt user when resolving
1619 # Pick random action. TODO: Instead, prompt user when resolving
1620 m, l = list(bids.items())[0]
1620 m, l = list(bids.items())[0]
1621 repo.ui.warn(
1621 repo.ui.warn(
1622 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1622 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1623 )
1623 )
1624 actions[f] = l[0]
1624 actions[f] = l[0]
1625 continue
1625 continue
1626 repo.ui.note(_(b'end of auction\n\n'))
1626 repo.ui.note(_(b'end of auction\n\n'))
1627
1627
1628 if wctx.rev() is None:
1628 if wctx.rev() is None:
1629 fractions = _forgetremoved(wctx, mctx, branchmerge)
1629 fractions = _forgetremoved(wctx, mctx, branchmerge)
1630 actions.update(fractions)
1630 actions.update(fractions)
1631
1631
1632 prunedactions = sparse.filterupdatesactions(
1632 prunedactions = sparse.filterupdatesactions(
1633 repo, wctx, mctx, branchmerge, actions
1633 repo, wctx, mctx, branchmerge, actions
1634 )
1634 )
1635 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1635 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1636
1636
1637 return prunedactions, diverge, renamedelete
1637 return prunedactions, diverge, renamedelete
1638
1638
1639
1639
1640 def _getcwd():
1640 def _getcwd():
1641 try:
1641 try:
1642 return encoding.getcwd()
1642 return encoding.getcwd()
1643 except OSError as err:
1643 except OSError as err:
1644 if err.errno == errno.ENOENT:
1644 if err.errno == errno.ENOENT:
1645 return None
1645 return None
1646 raise
1646 raise
1647
1647
1648
1648
1649 def batchremove(repo, wctx, actions):
1649 def batchremove(repo, wctx, actions):
1650 """apply removes to the working directory
1650 """apply removes to the working directory
1651
1651
1652 yields tuples for progress updates
1652 yields tuples for progress updates
1653 """
1653 """
1654 verbose = repo.ui.verbose
1654 verbose = repo.ui.verbose
1655 cwd = _getcwd()
1655 cwd = _getcwd()
1656 i = 0
1656 i = 0
1657 for f, args, msg in actions:
1657 for f, args, msg in actions:
1658 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1658 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1659 if verbose:
1659 if verbose:
1660 repo.ui.note(_(b"removing %s\n") % f)
1660 repo.ui.note(_(b"removing %s\n") % f)
1661 wctx[f].audit()
1661 wctx[f].audit()
1662 try:
1662 try:
1663 wctx[f].remove(ignoremissing=True)
1663 wctx[f].remove(ignoremissing=True)
1664 except OSError as inst:
1664 except OSError as inst:
1665 repo.ui.warn(
1665 repo.ui.warn(
1666 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1666 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1667 )
1667 )
1668 if i == 100:
1668 if i == 100:
1669 yield i, f
1669 yield i, f
1670 i = 0
1670 i = 0
1671 i += 1
1671 i += 1
1672 if i > 0:
1672 if i > 0:
1673 yield i, f
1673 yield i, f
1674
1674
1675 if cwd and not _getcwd():
1675 if cwd and not _getcwd():
1676 # cwd was removed in the course of removing files; print a helpful
1676 # cwd was removed in the course of removing files; print a helpful
1677 # warning.
1677 # warning.
1678 repo.ui.warn(
1678 repo.ui.warn(
1679 _(
1679 _(
1680 b"current directory was removed\n"
1680 b"current directory was removed\n"
1681 b"(consider changing to repo root: %s)\n"
1681 b"(consider changing to repo root: %s)\n"
1682 )
1682 )
1683 % repo.root
1683 % repo.root
1684 )
1684 )
1685
1685
1686
1686
1687 def batchget(repo, mctx, wctx, wantfiledata, actions):
1687 def batchget(repo, mctx, wctx, wantfiledata, actions):
1688 """apply gets to the working directory
1688 """apply gets to the working directory
1689
1689
1690 mctx is the context to get from
1690 mctx is the context to get from
1691
1691
1692 Yields arbitrarily many (False, tuple) for progress updates, followed by
1692 Yields arbitrarily many (False, tuple) for progress updates, followed by
1693 exactly one (True, filedata). When wantfiledata is false, filedata is an
1693 exactly one (True, filedata). When wantfiledata is false, filedata is an
1694 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1694 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1695 mtime) of the file f written for each action.
1695 mtime) of the file f written for each action.
1696 """
1696 """
1697 filedata = {}
1697 filedata = {}
1698 verbose = repo.ui.verbose
1698 verbose = repo.ui.verbose
1699 fctx = mctx.filectx
1699 fctx = mctx.filectx
1700 ui = repo.ui
1700 ui = repo.ui
1701 i = 0
1701 i = 0
1702 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1702 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1703 for f, (flags, backup), msg in actions:
1703 for f, (flags, backup), msg in actions:
1704 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1704 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1705 if verbose:
1705 if verbose:
1706 repo.ui.note(_(b"getting %s\n") % f)
1706 repo.ui.note(_(b"getting %s\n") % f)
1707
1707
1708 if backup:
1708 if backup:
1709 # If a file or directory exists with the same name, back that
1709 # If a file or directory exists with the same name, back that
1710 # up. Otherwise, look to see if there is a file that conflicts
1710 # up. Otherwise, look to see if there is a file that conflicts
1711 # with a directory this file is in, and if so, back that up.
1711 # with a directory this file is in, and if so, back that up.
1712 conflicting = f
1712 conflicting = f
1713 if not repo.wvfs.lexists(f):
1713 if not repo.wvfs.lexists(f):
1714 for p in pathutil.finddirs(f):
1714 for p in pathutil.finddirs(f):
1715 if repo.wvfs.isfileorlink(p):
1715 if repo.wvfs.isfileorlink(p):
1716 conflicting = p
1716 conflicting = p
1717 break
1717 break
1718 if repo.wvfs.lexists(conflicting):
1718 if repo.wvfs.lexists(conflicting):
1719 orig = scmutil.backuppath(ui, repo, conflicting)
1719 orig = scmutil.backuppath(ui, repo, conflicting)
1720 util.rename(repo.wjoin(conflicting), orig)
1720 util.rename(repo.wjoin(conflicting), orig)
1721 wfctx = wctx[f]
1721 wfctx = wctx[f]
1722 wfctx.clearunknown()
1722 wfctx.clearunknown()
1723 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1723 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1724 size = wfctx.write(
1724 size = wfctx.write(
1725 fctx(f).data(),
1725 fctx(f).data(),
1726 flags,
1726 flags,
1727 backgroundclose=True,
1727 backgroundclose=True,
1728 atomictemp=atomictemp,
1728 atomictemp=atomictemp,
1729 )
1729 )
1730 if wantfiledata:
1730 if wantfiledata:
1731 s = wfctx.lstat()
1731 s = wfctx.lstat()
1732 mode = s.st_mode
1732 mode = s.st_mode
1733 mtime = s[stat.ST_MTIME]
1733 mtime = s[stat.ST_MTIME]
1734 filedata[f] = (mode, size, mtime) # for dirstate.normal
1734 filedata[f] = (mode, size, mtime) # for dirstate.normal
1735 if i == 100:
1735 if i == 100:
1736 yield False, (i, f)
1736 yield False, (i, f)
1737 i = 0
1737 i = 0
1738 i += 1
1738 i += 1
1739 if i > 0:
1739 if i > 0:
1740 yield False, (i, f)
1740 yield False, (i, f)
1741 yield True, filedata
1741 yield True, filedata
1742
1742
1743
1743
1744 def _prefetchfiles(repo, ctx, actions):
1744 def _prefetchfiles(repo, ctx, actions):
1745 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1745 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1746 of merge actions. ``ctx`` is the context being merged in."""
1746 of merge actions. ``ctx`` is the context being merged in."""
1747
1747
1748 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1748 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1749 # don't touch the context to be merged in. 'cd' is skipped, because
1749 # don't touch the context to be merged in. 'cd' is skipped, because
1750 # changed/deleted never resolves to something from the remote side.
1750 # changed/deleted never resolves to something from the remote side.
1751 oplist = [
1751 oplist = [
1752 actions[a]
1752 actions[a]
1753 for a in (
1753 for a in (
1754 ACTION_GET,
1754 ACTION_GET,
1755 ACTION_DELETED_CHANGED,
1755 ACTION_DELETED_CHANGED,
1756 ACTION_LOCAL_DIR_RENAME_GET,
1756 ACTION_LOCAL_DIR_RENAME_GET,
1757 ACTION_MERGE,
1757 ACTION_MERGE,
1758 )
1758 )
1759 ]
1759 ]
1760 prefetch = scmutil.prefetchfiles
1760 prefetch = scmutil.prefetchfiles
1761 matchfiles = scmutil.matchfiles
1761 matchfiles = scmutil.matchfiles
1762 prefetch(
1762 prefetch(
1763 repo,
1763 repo,
1764 [ctx.rev()],
1764 [ctx.rev()],
1765 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1765 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1766 )
1766 )
1767
1767
1768
1768
1769 @attr.s(frozen=True)
1769 @attr.s(frozen=True)
1770 class updateresult(object):
1770 class updateresult(object):
1771 updatedcount = attr.ib()
1771 updatedcount = attr.ib()
1772 mergedcount = attr.ib()
1772 mergedcount = attr.ib()
1773 removedcount = attr.ib()
1773 removedcount = attr.ib()
1774 unresolvedcount = attr.ib()
1774 unresolvedcount = attr.ib()
1775
1775
1776 def isempty(self):
1776 def isempty(self):
1777 return not (
1777 return not (
1778 self.updatedcount
1778 self.updatedcount
1779 or self.mergedcount
1779 or self.mergedcount
1780 or self.removedcount
1780 or self.removedcount
1781 or self.unresolvedcount
1781 or self.unresolvedcount
1782 )
1782 )
1783
1783
1784
1784
1785 def emptyactions():
1785 def emptyactions():
1786 """create an actions dict, to be populated and passed to applyupdates()"""
1786 """create an actions dict, to be populated and passed to applyupdates()"""
1787 return dict(
1787 return dict(
1788 (m, [])
1788 (m, [])
1789 for m in (
1789 for m in (
1790 ACTION_ADD,
1790 ACTION_ADD,
1791 ACTION_ADD_MODIFIED,
1791 ACTION_ADD_MODIFIED,
1792 ACTION_FORGET,
1792 ACTION_FORGET,
1793 ACTION_GET,
1793 ACTION_GET,
1794 ACTION_CHANGED_DELETED,
1794 ACTION_CHANGED_DELETED,
1795 ACTION_DELETED_CHANGED,
1795 ACTION_DELETED_CHANGED,
1796 ACTION_REMOVE,
1796 ACTION_REMOVE,
1797 ACTION_DIR_RENAME_MOVE_LOCAL,
1797 ACTION_DIR_RENAME_MOVE_LOCAL,
1798 ACTION_LOCAL_DIR_RENAME_GET,
1798 ACTION_LOCAL_DIR_RENAME_GET,
1799 ACTION_MERGE,
1799 ACTION_MERGE,
1800 ACTION_EXEC,
1800 ACTION_EXEC,
1801 ACTION_KEEP,
1801 ACTION_KEEP,
1802 ACTION_PATH_CONFLICT,
1802 ACTION_PATH_CONFLICT,
1803 ACTION_PATH_CONFLICT_RESOLVE,
1803 ACTION_PATH_CONFLICT_RESOLVE,
1804 )
1804 )
1805 )
1805 )
1806
1806
1807
1807
1808 def applyupdates(
1808 def applyupdates(
1809 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1809 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1810 ):
1810 ):
1811 """apply the merge action list to the working directory
1811 """apply the merge action list to the working directory
1812
1812
1813 wctx is the working copy context
1813 wctx is the working copy context
1814 mctx is the context to be merged into the working copy
1814 mctx is the context to be merged into the working copy
1815
1815
1816 Return a tuple of (counts, filedata), where counts is a tuple
1816 Return a tuple of (counts, filedata), where counts is a tuple
1817 (updated, merged, removed, unresolved) that describes how many
1817 (updated, merged, removed, unresolved) that describes how many
1818 files were affected by the update, and filedata is as described in
1818 files were affected by the update, and filedata is as described in
1819 batchget.
1819 batchget.
1820 """
1820 """
1821
1821
1822 _prefetchfiles(repo, mctx, actions)
1822 _prefetchfiles(repo, mctx, actions)
1823
1823
1824 updated, merged, removed = 0, 0, 0
1824 updated, merged, removed = 0, 0, 0
1825 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1825 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1826 moves = []
1826 moves = []
1827 for m, l in actions.items():
1827 for m, l in actions.items():
1828 l.sort()
1828 l.sort()
1829
1829
1830 # 'cd' and 'dc' actions are treated like other merge conflicts
1830 # 'cd' and 'dc' actions are treated like other merge conflicts
1831 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1831 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1832 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1832 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1833 mergeactions.extend(actions[ACTION_MERGE])
1833 mergeactions.extend(actions[ACTION_MERGE])
1834 for f, args, msg in mergeactions:
1834 for f, args, msg in mergeactions:
1835 f1, f2, fa, move, anc = args
1835 f1, f2, fa, move, anc = args
1836 if f == b'.hgsubstate': # merged internally
1836 if f == b'.hgsubstate': # merged internally
1837 continue
1837 continue
1838 if f1 is None:
1838 if f1 is None:
1839 fcl = filemerge.absentfilectx(wctx, fa)
1839 fcl = filemerge.absentfilectx(wctx, fa)
1840 else:
1840 else:
1841 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1841 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1842 fcl = wctx[f1]
1842 fcl = wctx[f1]
1843 if f2 is None:
1843 if f2 is None:
1844 fco = filemerge.absentfilectx(mctx, fa)
1844 fco = filemerge.absentfilectx(mctx, fa)
1845 else:
1845 else:
1846 fco = mctx[f2]
1846 fco = mctx[f2]
1847 actx = repo[anc]
1847 actx = repo[anc]
1848 if fa in actx:
1848 if fa in actx:
1849 fca = actx[fa]
1849 fca = actx[fa]
1850 else:
1850 else:
1851 # TODO: move to absentfilectx
1851 # TODO: move to absentfilectx
1852 fca = repo.filectx(f1, fileid=nullrev)
1852 fca = repo.filectx(f1, fileid=nullrev)
1853 ms.add(fcl, fco, fca, f)
1853 ms.add(fcl, fco, fca, f)
1854 if f1 != f and move:
1854 if f1 != f and move:
1855 moves.append(f1)
1855 moves.append(f1)
1856
1856
1857 # remove renamed files after safely stored
1857 # remove renamed files after safely stored
1858 for f in moves:
1858 for f in moves:
1859 if wctx[f].lexists():
1859 if wctx[f].lexists():
1860 repo.ui.debug(b"removing %s\n" % f)
1860 repo.ui.debug(b"removing %s\n" % f)
1861 wctx[f].audit()
1861 wctx[f].audit()
1862 wctx[f].remove()
1862 wctx[f].remove()
1863
1863
1864 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1864 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1865 progress = repo.ui.makeprogress(
1865 progress = repo.ui.makeprogress(
1866 _(b'updating'), unit=_(b'files'), total=numupdates
1866 _(b'updating'), unit=_(b'files'), total=numupdates
1867 )
1867 )
1868
1868
1869 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1869 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1870 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1870 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1871
1871
1872 # record path conflicts
1872 # record path conflicts
1873 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1873 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1874 f1, fo = args
1874 f1, fo = args
1875 s = repo.ui.status
1875 s = repo.ui.status
1876 s(
1876 s(
1877 _(
1877 _(
1878 b"%s: path conflict - a file or link has the same name as a "
1878 b"%s: path conflict - a file or link has the same name as a "
1879 b"directory\n"
1879 b"directory\n"
1880 )
1880 )
1881 % f
1881 % f
1882 )
1882 )
1883 if fo == b'l':
1883 if fo == b'l':
1884 s(_(b"the local file has been renamed to %s\n") % f1)
1884 s(_(b"the local file has been renamed to %s\n") % f1)
1885 else:
1885 else:
1886 s(_(b"the remote file has been renamed to %s\n") % f1)
1886 s(_(b"the remote file has been renamed to %s\n") % f1)
1887 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1887 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1888 ms.addpath(f, f1, fo)
1888 ms.addpath(f, f1, fo)
1889 progress.increment(item=f)
1889 progress.increment(item=f)
1890
1890
1891 # When merging in-memory, we can't support worker processes, so set the
1891 # When merging in-memory, we can't support worker processes, so set the
1892 # per-item cost at 0 in that case.
1892 # per-item cost at 0 in that case.
1893 cost = 0 if wctx.isinmemory() else 0.001
1893 cost = 0 if wctx.isinmemory() else 0.001
1894
1894
1895 # remove in parallel (must come before resolving path conflicts and getting)
1895 # remove in parallel (must come before resolving path conflicts and getting)
1896 prog = worker.worker(
1896 prog = worker.worker(
1897 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1897 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1898 )
1898 )
1899 for i, item in prog:
1899 for i, item in prog:
1900 progress.increment(step=i, item=item)
1900 progress.increment(step=i, item=item)
1901 removed = len(actions[ACTION_REMOVE])
1901 removed = len(actions[ACTION_REMOVE])
1902
1902
1903 # resolve path conflicts (must come before getting)
1903 # resolve path conflicts (must come before getting)
1904 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1904 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1905 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1905 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1906 (f0,) = args
1906 (f0,) = args
1907 if wctx[f0].lexists():
1907 if wctx[f0].lexists():
1908 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1908 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1909 wctx[f].audit()
1909 wctx[f].audit()
1910 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1910 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1911 wctx[f0].remove()
1911 wctx[f0].remove()
1912 progress.increment(item=f)
1912 progress.increment(item=f)
1913
1913
1914 # get in parallel.
1914 # get in parallel.
1915 threadsafe = repo.ui.configbool(
1915 threadsafe = repo.ui.configbool(
1916 b'experimental', b'worker.wdir-get-thread-safe'
1916 b'experimental', b'worker.wdir-get-thread-safe'
1917 )
1917 )
1918 prog = worker.worker(
1918 prog = worker.worker(
1919 repo.ui,
1919 repo.ui,
1920 cost,
1920 cost,
1921 batchget,
1921 batchget,
1922 (repo, mctx, wctx, wantfiledata),
1922 (repo, mctx, wctx, wantfiledata),
1923 actions[ACTION_GET],
1923 actions[ACTION_GET],
1924 threadsafe=threadsafe,
1924 threadsafe=threadsafe,
1925 hasretval=True,
1925 hasretval=True,
1926 )
1926 )
1927 getfiledata = {}
1927 getfiledata = {}
1928 for final, res in prog:
1928 for final, res in prog:
1929 if final:
1929 if final:
1930 getfiledata = res
1930 getfiledata = res
1931 else:
1931 else:
1932 i, item = res
1932 i, item = res
1933 progress.increment(step=i, item=item)
1933 progress.increment(step=i, item=item)
1934 updated = len(actions[ACTION_GET])
1934 updated = len(actions[ACTION_GET])
1935
1935
1936 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1936 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1937 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1937 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1938
1938
1939 # forget (manifest only, just log it) (must come first)
1939 # forget (manifest only, just log it) (must come first)
1940 for f, args, msg in actions[ACTION_FORGET]:
1940 for f, args, msg in actions[ACTION_FORGET]:
1941 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1941 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1942 progress.increment(item=f)
1942 progress.increment(item=f)
1943
1943
1944 # re-add (manifest only, just log it)
1944 # re-add (manifest only, just log it)
1945 for f, args, msg in actions[ACTION_ADD]:
1945 for f, args, msg in actions[ACTION_ADD]:
1946 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1946 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1947 progress.increment(item=f)
1947 progress.increment(item=f)
1948
1948
1949 # re-add/mark as modified (manifest only, just log it)
1949 # re-add/mark as modified (manifest only, just log it)
1950 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1950 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1951 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1951 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1952 progress.increment(item=f)
1952 progress.increment(item=f)
1953
1953
1954 # keep (noop, just log it)
1954 # keep (noop, just log it)
1955 for f, args, msg in actions[ACTION_KEEP]:
1955 for f, args, msg in actions[ACTION_KEEP]:
1956 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1956 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1957 # no progress
1957 # no progress
1958
1958
1959 # directory rename, move local
1959 # directory rename, move local
1960 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1960 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1961 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1961 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1962 progress.increment(item=f)
1962 progress.increment(item=f)
1963 f0, flags = args
1963 f0, flags = args
1964 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1964 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1965 wctx[f].audit()
1965 wctx[f].audit()
1966 wctx[f].write(wctx.filectx(f0).data(), flags)
1966 wctx[f].write(wctx.filectx(f0).data(), flags)
1967 wctx[f0].remove()
1967 wctx[f0].remove()
1968 updated += 1
1968 updated += 1
1969
1969
1970 # local directory rename, get
1970 # local directory rename, get
1971 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1971 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1972 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1972 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1973 progress.increment(item=f)
1973 progress.increment(item=f)
1974 f0, flags = args
1974 f0, flags = args
1975 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1975 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1976 wctx[f].write(mctx.filectx(f0).data(), flags)
1976 wctx[f].write(mctx.filectx(f0).data(), flags)
1977 updated += 1
1977 updated += 1
1978
1978
1979 # exec
1979 # exec
1980 for f, args, msg in actions[ACTION_EXEC]:
1980 for f, args, msg in actions[ACTION_EXEC]:
1981 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1981 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1982 progress.increment(item=f)
1982 progress.increment(item=f)
1983 (flags,) = args
1983 (flags,) = args
1984 wctx[f].audit()
1984 wctx[f].audit()
1985 wctx[f].setflags(b'l' in flags, b'x' in flags)
1985 wctx[f].setflags(b'l' in flags, b'x' in flags)
1986 updated += 1
1986 updated += 1
1987
1987
1988 # the ordering is important here -- ms.mergedriver will raise if the merge
1988 # the ordering is important here -- ms.mergedriver will raise if the merge
1989 # driver has changed, and we want to be able to bypass it when overwrite is
1989 # driver has changed, and we want to be able to bypass it when overwrite is
1990 # True
1990 # True
1991 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1991 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1992
1992
1993 if usemergedriver:
1993 if usemergedriver:
1994 if wctx.isinmemory():
1994 if wctx.isinmemory():
1995 raise error.InMemoryMergeConflictsError(
1995 raise error.InMemoryMergeConflictsError(
1996 b"in-memory merge does not support mergedriver"
1996 b"in-memory merge does not support mergedriver"
1997 )
1997 )
1998 ms.commit()
1998 ms.commit()
1999 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1999 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2000 # the driver might leave some files unresolved
2000 # the driver might leave some files unresolved
2001 unresolvedf = set(ms.unresolved())
2001 unresolvedf = set(ms.unresolved())
2002 if not proceed:
2002 if not proceed:
2003 # XXX setting unresolved to at least 1 is a hack to make sure we
2003 # XXX setting unresolved to at least 1 is a hack to make sure we
2004 # error out
2004 # error out
2005 return updateresult(
2005 return updateresult(
2006 updated, merged, removed, max(len(unresolvedf), 1)
2006 updated, merged, removed, max(len(unresolvedf), 1)
2007 )
2007 )
2008 newactions = []
2008 newactions = []
2009 for f, args, msg in mergeactions:
2009 for f, args, msg in mergeactions:
2010 if f in unresolvedf:
2010 if f in unresolvedf:
2011 newactions.append((f, args, msg))
2011 newactions.append((f, args, msg))
2012 mergeactions = newactions
2012 mergeactions = newactions
2013
2013
2014 try:
2014 try:
2015 # premerge
2015 # premerge
2016 tocomplete = []
2016 tocomplete = []
2017 for f, args, msg in mergeactions:
2017 for f, args, msg in mergeactions:
2018 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2018 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2019 progress.increment(item=f)
2019 progress.increment(item=f)
2020 if f == b'.hgsubstate': # subrepo states need updating
2020 if f == b'.hgsubstate': # subrepo states need updating
2021 subrepoutil.submerge(
2021 subrepoutil.submerge(
2022 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2022 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2023 )
2023 )
2024 continue
2024 continue
2025 wctx[f].audit()
2025 wctx[f].audit()
2026 complete, r = ms.preresolve(f, wctx)
2026 complete, r = ms.preresolve(f, wctx)
2027 if not complete:
2027 if not complete:
2028 numupdates += 1
2028 numupdates += 1
2029 tocomplete.append((f, args, msg))
2029 tocomplete.append((f, args, msg))
2030
2030
2031 # merge
2031 # merge
2032 for f, args, msg in tocomplete:
2032 for f, args, msg in tocomplete:
2033 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2033 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2034 progress.increment(item=f, total=numupdates)
2034 progress.increment(item=f, total=numupdates)
2035 ms.resolve(f, wctx)
2035 ms.resolve(f, wctx)
2036
2036
2037 finally:
2037 finally:
2038 ms.commit()
2038 ms.commit()
2039
2039
2040 unresolved = ms.unresolvedcount()
2040 unresolved = ms.unresolvedcount()
2041
2041
2042 if (
2042 if (
2043 usemergedriver
2043 usemergedriver
2044 and not unresolved
2044 and not unresolved
2045 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2045 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2046 ):
2046 ):
2047 if not driverconclude(repo, ms, wctx, labels=labels):
2047 if not driverconclude(repo, ms, wctx, labels=labels):
2048 # XXX setting unresolved to at least 1 is a hack to make sure we
2048 # XXX setting unresolved to at least 1 is a hack to make sure we
2049 # error out
2049 # error out
2050 unresolved = max(unresolved, 1)
2050 unresolved = max(unresolved, 1)
2051
2051
2052 ms.commit()
2052 ms.commit()
2053
2053
2054 msupdated, msmerged, msremoved = ms.counts()
2054 msupdated, msmerged, msremoved = ms.counts()
2055 updated += msupdated
2055 updated += msupdated
2056 merged += msmerged
2056 merged += msmerged
2057 removed += msremoved
2057 removed += msremoved
2058
2058
2059 extraactions = ms.actions()
2059 extraactions = ms.actions()
2060 if extraactions:
2060 if extraactions:
2061 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2061 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2062 for k, acts in pycompat.iteritems(extraactions):
2062 for k, acts in pycompat.iteritems(extraactions):
2063 actions[k].extend(acts)
2063 actions[k].extend(acts)
2064 if k == ACTION_GET and wantfiledata:
2064 if k == ACTION_GET and wantfiledata:
2065 # no filedata until mergestate is updated to provide it
2065 # no filedata until mergestate is updated to provide it
2066 for a in acts:
2066 for a in acts:
2067 getfiledata[a[0]] = None
2067 getfiledata[a[0]] = None
2068 # Remove these files from actions[ACTION_MERGE] as well. This is
2068 # Remove these files from actions[ACTION_MERGE] as well. This is
2069 # important because in recordupdates, files in actions[ACTION_MERGE]
2069 # important because in recordupdates, files in actions[ACTION_MERGE]
2070 # are processed after files in other actions, and the merge driver
2070 # are processed after files in other actions, and the merge driver
2071 # might add files to those actions via extraactions above. This can
2071 # might add files to those actions via extraactions above. This can
2072 # lead to a file being recorded twice, with poor results. This is
2072 # lead to a file being recorded twice, with poor results. This is
2073 # especially problematic for actions[ACTION_REMOVE] (currently only
2073 # especially problematic for actions[ACTION_REMOVE] (currently only
2074 # possible with the merge driver in the initial merge process;
2074 # possible with the merge driver in the initial merge process;
2075 # interrupted merges don't go through this flow).
2075 # interrupted merges don't go through this flow).
2076 #
2076 #
2077 # The real fix here is to have indexes by both file and action so
2077 # The real fix here is to have indexes by both file and action so
2078 # that when the action for a file is changed it is automatically
2078 # that when the action for a file is changed it is automatically
2079 # reflected in the other action lists. But that involves a more
2079 # reflected in the other action lists. But that involves a more
2080 # complex data structure, so this will do for now.
2080 # complex data structure, so this will do for now.
2081 #
2081 #
2082 # We don't need to do the same operation for 'dc' and 'cd' because
2082 # We don't need to do the same operation for 'dc' and 'cd' because
2083 # those lists aren't consulted again.
2083 # those lists aren't consulted again.
2084 mfiles.difference_update(a[0] for a in acts)
2084 mfiles.difference_update(a[0] for a in acts)
2085
2085
2086 actions[ACTION_MERGE] = [
2086 actions[ACTION_MERGE] = [
2087 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2087 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2088 ]
2088 ]
2089
2089
2090 progress.complete()
2090 progress.complete()
2091 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2091 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2092 return updateresult(updated, merged, removed, unresolved), getfiledata
2092 return updateresult(updated, merged, removed, unresolved), getfiledata
2093
2093
2094
2094
2095 def recordupdates(repo, actions, branchmerge, getfiledata):
2095 def recordupdates(repo, actions, branchmerge, getfiledata):
2096 b"record merge actions to the dirstate"
2096 b"record merge actions to the dirstate"
2097 # remove (must come first)
2097 # remove (must come first)
2098 for f, args, msg in actions.get(ACTION_REMOVE, []):
2098 for f, args, msg in actions.get(ACTION_REMOVE, []):
2099 if branchmerge:
2099 if branchmerge:
2100 repo.dirstate.remove(f)
2100 repo.dirstate.remove(f)
2101 else:
2101 else:
2102 repo.dirstate.drop(f)
2102 repo.dirstate.drop(f)
2103
2103
2104 # forget (must come first)
2104 # forget (must come first)
2105 for f, args, msg in actions.get(ACTION_FORGET, []):
2105 for f, args, msg in actions.get(ACTION_FORGET, []):
2106 repo.dirstate.drop(f)
2106 repo.dirstate.drop(f)
2107
2107
2108 # resolve path conflicts
2108 # resolve path conflicts
2109 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2109 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2110 (f0,) = args
2110 (f0,) = args
2111 origf0 = repo.dirstate.copied(f0) or f0
2111 origf0 = repo.dirstate.copied(f0) or f0
2112 repo.dirstate.add(f)
2112 repo.dirstate.add(f)
2113 repo.dirstate.copy(origf0, f)
2113 repo.dirstate.copy(origf0, f)
2114 if f0 == origf0:
2114 if f0 == origf0:
2115 repo.dirstate.remove(f0)
2115 repo.dirstate.remove(f0)
2116 else:
2116 else:
2117 repo.dirstate.drop(f0)
2117 repo.dirstate.drop(f0)
2118
2118
2119 # re-add
2119 # re-add
2120 for f, args, msg in actions.get(ACTION_ADD, []):
2120 for f, args, msg in actions.get(ACTION_ADD, []):
2121 repo.dirstate.add(f)
2121 repo.dirstate.add(f)
2122
2122
2123 # re-add/mark as modified
2123 # re-add/mark as modified
2124 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2124 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2125 if branchmerge:
2125 if branchmerge:
2126 repo.dirstate.normallookup(f)
2126 repo.dirstate.normallookup(f)
2127 else:
2127 else:
2128 repo.dirstate.add(f)
2128 repo.dirstate.add(f)
2129
2129
2130 # exec change
2130 # exec change
2131 for f, args, msg in actions.get(ACTION_EXEC, []):
2131 for f, args, msg in actions.get(ACTION_EXEC, []):
2132 repo.dirstate.normallookup(f)
2132 repo.dirstate.normallookup(f)
2133
2133
2134 # keep
2134 # keep
2135 for f, args, msg in actions.get(ACTION_KEEP, []):
2135 for f, args, msg in actions.get(ACTION_KEEP, []):
2136 pass
2136 pass
2137
2137
2138 # get
2138 # get
2139 for f, args, msg in actions.get(ACTION_GET, []):
2139 for f, args, msg in actions.get(ACTION_GET, []):
2140 if branchmerge:
2140 if branchmerge:
2141 repo.dirstate.otherparent(f)
2141 repo.dirstate.otherparent(f)
2142 else:
2142 else:
2143 parentfiledata = getfiledata[f] if getfiledata else None
2143 parentfiledata = getfiledata[f] if getfiledata else None
2144 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2144 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2145
2145
2146 # merge
2146 # merge
2147 for f, args, msg in actions.get(ACTION_MERGE, []):
2147 for f, args, msg in actions.get(ACTION_MERGE, []):
2148 f1, f2, fa, move, anc = args
2148 f1, f2, fa, move, anc = args
2149 if branchmerge:
2149 if branchmerge:
2150 # We've done a branch merge, mark this file as merged
2150 # We've done a branch merge, mark this file as merged
2151 # so that we properly record the merger later
2151 # so that we properly record the merger later
2152 repo.dirstate.merge(f)
2152 repo.dirstate.merge(f)
2153 if f1 != f2: # copy/rename
2153 if f1 != f2: # copy/rename
2154 if move:
2154 if move:
2155 repo.dirstate.remove(f1)
2155 repo.dirstate.remove(f1)
2156 if f1 != f:
2156 if f1 != f:
2157 repo.dirstate.copy(f1, f)
2157 repo.dirstate.copy(f1, f)
2158 else:
2158 else:
2159 repo.dirstate.copy(f2, f)
2159 repo.dirstate.copy(f2, f)
2160 else:
2160 else:
2161 # We've update-merged a locally modified file, so
2161 # We've update-merged a locally modified file, so
2162 # we set the dirstate to emulate a normal checkout
2162 # we set the dirstate to emulate a normal checkout
2163 # of that file some time in the past. Thus our
2163 # of that file some time in the past. Thus our
2164 # merge will appear as a normal local file
2164 # merge will appear as a normal local file
2165 # modification.
2165 # modification.
2166 if f2 == f: # file not locally copied/moved
2166 if f2 == f: # file not locally copied/moved
2167 repo.dirstate.normallookup(f)
2167 repo.dirstate.normallookup(f)
2168 if move:
2168 if move:
2169 repo.dirstate.drop(f1)
2169 repo.dirstate.drop(f1)
2170
2170
2171 # directory rename, move local
2171 # directory rename, move local
2172 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2172 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2173 f0, flag = args
2173 f0, flag = args
2174 if branchmerge:
2174 if branchmerge:
2175 repo.dirstate.add(f)
2175 repo.dirstate.add(f)
2176 repo.dirstate.remove(f0)
2176 repo.dirstate.remove(f0)
2177 repo.dirstate.copy(f0, f)
2177 repo.dirstate.copy(f0, f)
2178 else:
2178 else:
2179 repo.dirstate.normal(f)
2179 repo.dirstate.normal(f)
2180 repo.dirstate.drop(f0)
2180 repo.dirstate.drop(f0)
2181
2181
2182 # directory rename, get
2182 # directory rename, get
2183 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2183 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2184 f0, flag = args
2184 f0, flag = args
2185 if branchmerge:
2185 if branchmerge:
2186 repo.dirstate.add(f)
2186 repo.dirstate.add(f)
2187 repo.dirstate.copy(f0, f)
2187 repo.dirstate.copy(f0, f)
2188 else:
2188 else:
2189 repo.dirstate.normal(f)
2189 repo.dirstate.normal(f)
2190
2190
2191
2191
2192 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2192 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2193 UPDATECHECK_NONE = b'none'
2193 UPDATECHECK_NONE = b'none'
2194 UPDATECHECK_LINEAR = b'linear'
2194 UPDATECHECK_LINEAR = b'linear'
2195 UPDATECHECK_NO_CONFLICT = b'noconflict'
2195 UPDATECHECK_NO_CONFLICT = b'noconflict'
2196
2196
2197
2197
2198 def update(
2198 def update(
2199 repo,
2199 repo,
2200 node,
2200 node,
2201 branchmerge,
2201 branchmerge,
2202 force,
2202 force,
2203 ancestor=None,
2203 ancestor=None,
2204 mergeancestor=False,
2204 mergeancestor=False,
2205 labels=None,
2205 labels=None,
2206 matcher=None,
2206 matcher=None,
2207 mergeforce=False,
2207 mergeforce=False,
2208 updatecheck=None,
2208 updatecheck=None,
2209 wc=None,
2209 wc=None,
2210 ):
2210 ):
2211 """
2211 """
2212 Perform a merge between the working directory and the given node
2212 Perform a merge between the working directory and the given node
2213
2213
2214 node = the node to update to
2214 node = the node to update to
2215 branchmerge = whether to merge between branches
2215 branchmerge = whether to merge between branches
2216 force = whether to force branch merging or file overwriting
2216 force = whether to force branch merging or file overwriting
2217 matcher = a matcher to filter file lists (dirstate not updated)
2217 matcher = a matcher to filter file lists (dirstate not updated)
2218 mergeancestor = whether it is merging with an ancestor. If true,
2218 mergeancestor = whether it is merging with an ancestor. If true,
2219 we should accept the incoming changes for any prompts that occur.
2219 we should accept the incoming changes for any prompts that occur.
2220 If false, merging with an ancestor (fast-forward) is only allowed
2220 If false, merging with an ancestor (fast-forward) is only allowed
2221 between different named branches. This flag is used by rebase extension
2221 between different named branches. This flag is used by rebase extension
2222 as a temporary fix and should be avoided in general.
2222 as a temporary fix and should be avoided in general.
2223 labels = labels to use for base, local and other
2223 labels = labels to use for base, local and other
2224 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2224 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2225 this is True, then 'force' should be True as well.
2225 this is True, then 'force' should be True as well.
2226
2226
2227 The table below shows all the behaviors of the update command given the
2227 The table below shows all the behaviors of the update command given the
2228 -c/--check and -C/--clean or no options, whether the working directory is
2228 -c/--check and -C/--clean or no options, whether the working directory is
2229 dirty, whether a revision is specified, and the relationship of the parent
2229 dirty, whether a revision is specified, and the relationship of the parent
2230 rev to the target rev (linear or not). Match from top first. The -n
2230 rev to the target rev (linear or not). Match from top first. The -n
2231 option doesn't exist on the command line, but represents the
2231 option doesn't exist on the command line, but represents the
2232 experimental.updatecheck=noconflict option.
2232 experimental.updatecheck=noconflict option.
2233
2233
2234 This logic is tested by test-update-branches.t.
2234 This logic is tested by test-update-branches.t.
2235
2235
2236 -c -C -n -m dirty rev linear | result
2236 -c -C -n -m dirty rev linear | result
2237 y y * * * * * | (1)
2237 y y * * * * * | (1)
2238 y * y * * * * | (1)
2238 y * y * * * * | (1)
2239 y * * y * * * | (1)
2239 y * * y * * * | (1)
2240 * y y * * * * | (1)
2240 * y y * * * * | (1)
2241 * y * y * * * | (1)
2241 * y * y * * * | (1)
2242 * * y y * * * | (1)
2242 * * y y * * * | (1)
2243 * * * * * n n | x
2243 * * * * * n n | x
2244 * * * * n * * | ok
2244 * * * * n * * | ok
2245 n n n n y * y | merge
2245 n n n n y * y | merge
2246 n n n n y y n | (2)
2246 n n n n y y n | (2)
2247 n n n y y * * | merge
2247 n n n y y * * | merge
2248 n n y n y * * | merge if no conflict
2248 n n y n y * * | merge if no conflict
2249 n y n n y * * | discard
2249 n y n n y * * | discard
2250 y n n n y * * | (3)
2250 y n n n y * * | (3)
2251
2251
2252 x = can't happen
2252 x = can't happen
2253 * = don't-care
2253 * = don't-care
2254 1 = incompatible options (checked in commands.py)
2254 1 = incompatible options (checked in commands.py)
2255 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2255 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2256 3 = abort: uncommitted changes (checked in commands.py)
2256 3 = abort: uncommitted changes (checked in commands.py)
2257
2257
2258 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2258 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2259 to repo[None] if None is passed.
2259 to repo[None] if None is passed.
2260
2260
2261 Return the same tuple as applyupdates().
2261 Return the same tuple as applyupdates().
2262 """
2262 """
2263 # Avoid cycle.
2263 # Avoid cycle.
2264 from . import sparse
2264 from . import sparse
2265
2265
2266 # This function used to find the default destination if node was None, but
2266 # This function used to find the default destination if node was None, but
2267 # that's now in destutil.py.
2267 # that's now in destutil.py.
2268 assert node is not None
2268 assert node is not None
2269 if not branchmerge and not force:
2269 if not branchmerge and not force:
2270 # TODO: remove the default once all callers that pass branchmerge=False
2270 # TODO: remove the default once all callers that pass branchmerge=False
2271 # and force=False pass a value for updatecheck. We may want to allow
2271 # and force=False pass a value for updatecheck. We may want to allow
2272 # updatecheck='abort' to better suppport some of these callers.
2272 # updatecheck='abort' to better suppport some of these callers.
2273 if updatecheck is None:
2273 if updatecheck is None:
2274 updatecheck = UPDATECHECK_LINEAR
2274 updatecheck = UPDATECHECK_LINEAR
2275 if updatecheck not in (
2275 if updatecheck not in (
2276 UPDATECHECK_NONE,
2276 UPDATECHECK_NONE,
2277 UPDATECHECK_LINEAR,
2277 UPDATECHECK_LINEAR,
2278 UPDATECHECK_NO_CONFLICT,
2278 UPDATECHECK_NO_CONFLICT,
2279 ):
2279 ):
2280 raise ValueError(
2280 raise ValueError(
2281 r'Invalid updatecheck %r (can accept %r)'
2281 r'Invalid updatecheck %r (can accept %r)'
2282 % (
2282 % (
2283 updatecheck,
2283 updatecheck,
2284 (
2284 (
2285 UPDATECHECK_NONE,
2285 UPDATECHECK_NONE,
2286 UPDATECHECK_LINEAR,
2286 UPDATECHECK_LINEAR,
2287 UPDATECHECK_NO_CONFLICT,
2287 UPDATECHECK_NO_CONFLICT,
2288 ),
2288 ),
2289 )
2289 )
2290 )
2290 )
2291 # If we're doing a partial update, we need to skip updating
2291 # If we're doing a partial update, we need to skip updating
2292 # the dirstate, so make a note of any partial-ness to the
2292 # the dirstate, so make a note of any partial-ness to the
2293 # update here.
2293 # update here.
2294 if matcher is None or matcher.always():
2294 if matcher is None or matcher.always():
2295 partial = False
2295 partial = False
2296 else:
2296 else:
2297 partial = True
2297 partial = True
2298 with repo.wlock():
2298 with repo.wlock():
2299 if wc is None:
2299 if wc is None:
2300 wc = repo[None]
2300 wc = repo[None]
2301 pl = wc.parents()
2301 pl = wc.parents()
2302 p1 = pl[0]
2302 p1 = pl[0]
2303 p2 = repo[node]
2303 p2 = repo[node]
2304 if ancestor is not None:
2304 if ancestor is not None:
2305 pas = [repo[ancestor]]
2305 pas = [repo[ancestor]]
2306 else:
2306 else:
2307 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2307 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2308 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2308 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2309 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2309 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2310 else:
2310 else:
2311 pas = [p1.ancestor(p2, warn=branchmerge)]
2311 pas = [p1.ancestor(p2, warn=branchmerge)]
2312
2312
2313 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2313 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2314
2314
2315 overwrite = force and not branchmerge
2315 overwrite = force and not branchmerge
2316 ### check phase
2316 ### check phase
2317 if not overwrite:
2317 if not overwrite:
2318 if len(pl) > 1:
2318 if len(pl) > 1:
2319 raise error.Abort(_(b"outstanding uncommitted merge"))
2319 raise error.Abort(_(b"outstanding uncommitted merge"))
2320 ms = mergestate.read(repo)
2320 ms = mergestate.read(repo)
2321 if list(ms.unresolved()):
2321 if list(ms.unresolved()):
2322 raise error.Abort(
2322 raise error.Abort(
2323 _(b"outstanding merge conflicts"),
2323 _(b"outstanding merge conflicts"),
2324 hint=_(b"use 'hg resolve' to resolve"),
2324 hint=_(b"use 'hg resolve' to resolve"),
2325 )
2325 )
2326 if branchmerge:
2326 if branchmerge:
2327 if pas == [p2]:
2327 if pas == [p2]:
2328 raise error.Abort(
2328 raise error.Abort(
2329 _(
2329 _(
2330 b"merging with a working directory ancestor"
2330 b"merging with a working directory ancestor"
2331 b" has no effect"
2331 b" has no effect"
2332 )
2332 )
2333 )
2333 )
2334 elif pas == [p1]:
2334 elif pas == [p1]:
2335 if not mergeancestor and wc.branch() == p2.branch():
2335 if not mergeancestor and wc.branch() == p2.branch():
2336 raise error.Abort(
2336 raise error.Abort(
2337 _(b"nothing to merge"),
2337 _(b"nothing to merge"),
2338 hint=_(b"use 'hg update' or check 'hg heads'"),
2338 hint=_(b"use 'hg update' or check 'hg heads'"),
2339 )
2339 )
2340 if not force and (wc.files() or wc.deleted()):
2340 if not force and (wc.files() or wc.deleted()):
2341 raise error.Abort(
2341 raise error.Abort(
2342 _(b"uncommitted changes"),
2342 _(b"uncommitted changes"),
2343 hint=_(b"use 'hg status' to list changes"),
2343 hint=_(b"use 'hg status' to list changes"),
2344 )
2344 )
2345 if not wc.isinmemory():
2345 if not wc.isinmemory():
2346 for s in sorted(wc.substate):
2346 for s in sorted(wc.substate):
2347 wc.sub(s).bailifchanged()
2347 wc.sub(s).bailifchanged()
2348
2348
2349 elif not overwrite:
2349 elif not overwrite:
2350 if p1 == p2: # no-op update
2350 if p1 == p2: # no-op update
2351 # call the hooks and exit early
2351 # call the hooks and exit early
2352 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2352 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2353 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2353 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2354 return updateresult(0, 0, 0, 0)
2354 return updateresult(0, 0, 0, 0)
2355
2355
2356 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2356 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2357 [p1],
2357 [p1],
2358 [p2],
2358 [p2],
2359 ): # nonlinear
2359 ): # nonlinear
2360 dirty = wc.dirty(missing=True)
2360 dirty = wc.dirty(missing=True)
2361 if dirty:
2361 if dirty:
2362 # Branching is a bit strange to ensure we do the minimal
2362 # Branching is a bit strange to ensure we do the minimal
2363 # amount of call to obsutil.foreground.
2363 # amount of call to obsutil.foreground.
2364 foreground = obsutil.foreground(repo, [p1.node()])
2364 foreground = obsutil.foreground(repo, [p1.node()])
2365 # note: the <node> variable contains a random identifier
2365 # note: the <node> variable contains a random identifier
2366 if repo[node].node() in foreground:
2366 if repo[node].node() in foreground:
2367 pass # allow updating to successors
2367 pass # allow updating to successors
2368 else:
2368 else:
2369 msg = _(b"uncommitted changes")
2369 msg = _(b"uncommitted changes")
2370 hint = _(b"commit or update --clean to discard changes")
2370 hint = _(b"commit or update --clean to discard changes")
2371 raise error.UpdateAbort(msg, hint=hint)
2371 raise error.UpdateAbort(msg, hint=hint)
2372 else:
2372 else:
2373 # Allow jumping branches if clean and specific rev given
2373 # Allow jumping branches if clean and specific rev given
2374 pass
2374 pass
2375
2375
2376 if overwrite:
2376 if overwrite:
2377 pas = [wc]
2377 pas = [wc]
2378 elif not branchmerge:
2378 elif not branchmerge:
2379 pas = [p1]
2379 pas = [p1]
2380
2380
2381 # deprecated config: merge.followcopies
2381 # deprecated config: merge.followcopies
2382 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2382 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2383 if overwrite:
2383 if overwrite:
2384 followcopies = False
2384 followcopies = False
2385 elif not pas[0]:
2385 elif not pas[0]:
2386 followcopies = False
2386 followcopies = False
2387 if not branchmerge and not wc.dirty(missing=True):
2387 if not branchmerge and not wc.dirty(missing=True):
2388 followcopies = False
2388 followcopies = False
2389
2389
2390 ### calculate phase
2390 ### calculate phase
2391 actionbyfile, diverge, renamedelete = calculateupdates(
2391 actionbyfile, diverge, renamedelete = calculateupdates(
2392 repo,
2392 repo,
2393 wc,
2393 wc,
2394 p2,
2394 p2,
2395 pas,
2395 pas,
2396 branchmerge,
2396 branchmerge,
2397 force,
2397 force,
2398 mergeancestor,
2398 mergeancestor,
2399 followcopies,
2399 followcopies,
2400 matcher=matcher,
2400 matcher=matcher,
2401 mergeforce=mergeforce,
2401 mergeforce=mergeforce,
2402 )
2402 )
2403
2403
2404 if updatecheck == UPDATECHECK_NO_CONFLICT:
2404 if updatecheck == UPDATECHECK_NO_CONFLICT:
2405 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2405 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2406 if m not in (
2406 if m not in (
2407 ACTION_GET,
2407 ACTION_GET,
2408 ACTION_KEEP,
2408 ACTION_KEEP,
2409 ACTION_EXEC,
2409 ACTION_EXEC,
2410 ACTION_REMOVE,
2410 ACTION_REMOVE,
2411 ACTION_PATH_CONFLICT_RESOLVE,
2411 ACTION_PATH_CONFLICT_RESOLVE,
2412 ):
2412 ):
2413 msg = _(b"conflicting changes")
2413 msg = _(b"conflicting changes")
2414 hint = _(b"commit or update --clean to discard changes")
2414 hint = _(b"commit or update --clean to discard changes")
2415 raise error.Abort(msg, hint=hint)
2415 raise error.Abort(msg, hint=hint)
2416
2416
2417 # Prompt and create actions. Most of this is in the resolve phase
2417 # Prompt and create actions. Most of this is in the resolve phase
2418 # already, but we can't handle .hgsubstate in filemerge or
2418 # already, but we can't handle .hgsubstate in filemerge or
2419 # subrepoutil.submerge yet so we have to keep prompting for it.
2419 # subrepoutil.submerge yet so we have to keep prompting for it.
2420 if b'.hgsubstate' in actionbyfile:
2420 if b'.hgsubstate' in actionbyfile:
2421 f = b'.hgsubstate'
2421 f = b'.hgsubstate'
2422 m, args, msg = actionbyfile[f]
2422 m, args, msg = actionbyfile[f]
2423 prompts = filemerge.partextras(labels)
2423 prompts = filemerge.partextras(labels)
2424 prompts[b'f'] = f
2424 prompts[b'f'] = f
2425 if m == ACTION_CHANGED_DELETED:
2425 if m == ACTION_CHANGED_DELETED:
2426 if repo.ui.promptchoice(
2426 if repo.ui.promptchoice(
2427 _(
2427 _(
2428 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2428 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2429 b"use (c)hanged version or (d)elete?"
2429 b"use (c)hanged version or (d)elete?"
2430 b"$$ &Changed $$ &Delete"
2430 b"$$ &Changed $$ &Delete"
2431 )
2431 )
2432 % prompts,
2432 % prompts,
2433 0,
2433 0,
2434 ):
2434 ):
2435 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2435 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2436 elif f in p1:
2436 elif f in p1:
2437 actionbyfile[f] = (
2437 actionbyfile[f] = (
2438 ACTION_ADD_MODIFIED,
2438 ACTION_ADD_MODIFIED,
2439 None,
2439 None,
2440 b'prompt keep',
2440 b'prompt keep',
2441 )
2441 )
2442 else:
2442 else:
2443 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2443 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2444 elif m == ACTION_DELETED_CHANGED:
2444 elif m == ACTION_DELETED_CHANGED:
2445 f1, f2, fa, move, anc = args
2445 f1, f2, fa, move, anc = args
2446 flags = p2[f2].flags()
2446 flags = p2[f2].flags()
2447 if (
2447 if (
2448 repo.ui.promptchoice(
2448 repo.ui.promptchoice(
2449 _(
2449 _(
2450 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2450 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2451 b"use (c)hanged version or leave (d)eleted?"
2451 b"use (c)hanged version or leave (d)eleted?"
2452 b"$$ &Changed $$ &Deleted"
2452 b"$$ &Changed $$ &Deleted"
2453 )
2453 )
2454 % prompts,
2454 % prompts,
2455 0,
2455 0,
2456 )
2456 )
2457 == 0
2457 == 0
2458 ):
2458 ):
2459 actionbyfile[f] = (
2459 actionbyfile[f] = (
2460 ACTION_GET,
2460 ACTION_GET,
2461 (flags, False),
2461 (flags, False),
2462 b'prompt recreating',
2462 b'prompt recreating',
2463 )
2463 )
2464 else:
2464 else:
2465 del actionbyfile[f]
2465 del actionbyfile[f]
2466
2466
2467 # Convert to dictionary-of-lists format
2467 # Convert to dictionary-of-lists format
2468 actions = emptyactions()
2468 actions = emptyactions()
2469 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2469 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2470 if m not in actions:
2470 if m not in actions:
2471 actions[m] = []
2471 actions[m] = []
2472 actions[m].append((f, args, msg))
2472 actions[m].append((f, args, msg))
2473
2473
2474 if not util.fscasesensitive(repo.path):
2474 if not util.fscasesensitive(repo.path):
2475 # check collision between files only in p2 for clean update
2475 # check collision between files only in p2 for clean update
2476 if not branchmerge and (
2476 if not branchmerge and (
2477 force or not wc.dirty(missing=True, branch=False)
2477 force or not wc.dirty(missing=True, branch=False)
2478 ):
2478 ):
2479 _checkcollision(repo, p2.manifest(), None)
2479 _checkcollision(repo, p2.manifest(), None)
2480 else:
2480 else:
2481 _checkcollision(repo, wc.manifest(), actions)
2481 _checkcollision(repo, wc.manifest(), actions)
2482
2482
2483 # divergent renames
2483 # divergent renames
2484 for f, fl in sorted(pycompat.iteritems(diverge)):
2484 for f, fl in sorted(pycompat.iteritems(diverge)):
2485 repo.ui.warn(
2485 repo.ui.warn(
2486 _(
2486 _(
2487 b"note: possible conflict - %s was renamed "
2487 b"note: possible conflict - %s was renamed "
2488 b"multiple times to:\n"
2488 b"multiple times to:\n"
2489 )
2489 )
2490 % f
2490 % f
2491 )
2491 )
2492 for nf in sorted(fl):
2492 for nf in sorted(fl):
2493 repo.ui.warn(b" %s\n" % nf)
2493 repo.ui.warn(b" %s\n" % nf)
2494
2494
2495 # rename and delete
2495 # rename and delete
2496 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2496 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2497 repo.ui.warn(
2497 repo.ui.warn(
2498 _(
2498 _(
2499 b"note: possible conflict - %s was deleted "
2499 b"note: possible conflict - %s was deleted "
2500 b"and renamed to:\n"
2500 b"and renamed to:\n"
2501 )
2501 )
2502 % f
2502 % f
2503 )
2503 )
2504 for nf in sorted(fl):
2504 for nf in sorted(fl):
2505 repo.ui.warn(b" %s\n" % nf)
2505 repo.ui.warn(b" %s\n" % nf)
2506
2506
2507 ### apply phase
2507 ### apply phase
2508 if not branchmerge: # just jump to the new rev
2508 if not branchmerge: # just jump to the new rev
2509 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2509 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2510 if not partial and not wc.isinmemory():
2510 if not partial and not wc.isinmemory():
2511 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2511 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2512 # note that we're in the middle of an update
2512 # note that we're in the middle of an update
2513 repo.vfs.write(b'updatestate', p2.hex())
2513 repo.vfs.write(b'updatestate', p2.hex())
2514
2514
2515 # Advertise fsmonitor when its presence could be useful.
2515 # Advertise fsmonitor when its presence could be useful.
2516 #
2516 #
2517 # We only advertise when performing an update from an empty working
2517 # We only advertise when performing an update from an empty working
2518 # directory. This typically only occurs during initial clone.
2518 # directory. This typically only occurs during initial clone.
2519 #
2519 #
2520 # We give users a mechanism to disable the warning in case it is
2520 # We give users a mechanism to disable the warning in case it is
2521 # annoying.
2521 # annoying.
2522 #
2522 #
2523 # We only allow on Linux and MacOS because that's where fsmonitor is
2523 # We only allow on Linux and MacOS because that's where fsmonitor is
2524 # considered stable.
2524 # considered stable.
2525 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2525 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2526 fsmonitorthreshold = repo.ui.configint(
2526 fsmonitorthreshold = repo.ui.configint(
2527 b'fsmonitor', b'warn_update_file_count'
2527 b'fsmonitor', b'warn_update_file_count'
2528 )
2528 )
2529 try:
2529 try:
2530 # avoid cycle: extensions -> cmdutil -> merge
2530 # avoid cycle: extensions -> cmdutil -> merge
2531 from . import extensions
2531 from . import extensions
2532
2532
2533 extensions.find(b'fsmonitor')
2533 extensions.find(b'fsmonitor')
2534 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2534 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2535 # We intentionally don't look at whether fsmonitor has disabled
2535 # We intentionally don't look at whether fsmonitor has disabled
2536 # itself because a) fsmonitor may have already printed a warning
2536 # itself because a) fsmonitor may have already printed a warning
2537 # b) we only care about the config state here.
2537 # b) we only care about the config state here.
2538 except KeyError:
2538 except KeyError:
2539 fsmonitorenabled = False
2539 fsmonitorenabled = False
2540
2540
2541 if (
2541 if (
2542 fsmonitorwarning
2542 fsmonitorwarning
2543 and not fsmonitorenabled
2543 and not fsmonitorenabled
2544 and p1.node() == nullid
2544 and p1.node() == nullid
2545 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2545 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2546 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2546 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2547 ):
2547 ):
2548 repo.ui.warn(
2548 repo.ui.warn(
2549 _(
2549 _(
2550 b'(warning: large working directory being used without '
2550 b'(warning: large working directory being used without '
2551 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2551 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2552 b'see "hg help -e fsmonitor")\n'
2552 b'see "hg help -e fsmonitor")\n'
2553 )
2553 )
2554 )
2554 )
2555
2555
2556 updatedirstate = not partial and not wc.isinmemory()
2556 updatedirstate = not partial and not wc.isinmemory()
2557 wantfiledata = updatedirstate and not branchmerge
2557 wantfiledata = updatedirstate and not branchmerge
2558 stats, getfiledata = applyupdates(
2558 stats, getfiledata = applyupdates(
2559 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2559 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2560 )
2560 )
2561
2561
2562 if updatedirstate:
2562 if updatedirstate:
2563 with repo.dirstate.parentchange():
2563 with repo.dirstate.parentchange():
2564 repo.setparents(fp1, fp2)
2564 repo.setparents(fp1, fp2)
2565 recordupdates(repo, actions, branchmerge, getfiledata)
2565 recordupdates(repo, actions, branchmerge, getfiledata)
2566 # update completed, clear state
2566 # update completed, clear state
2567 util.unlink(repo.vfs.join(b'updatestate'))
2567 util.unlink(repo.vfs.join(b'updatestate'))
2568
2568
2569 if not branchmerge:
2569 if not branchmerge:
2570 repo.dirstate.setbranch(p2.branch())
2570 repo.dirstate.setbranch(p2.branch())
2571
2571
2572 # If we're updating to a location, clean up any stale temporary includes
2572 # If we're updating to a location, clean up any stale temporary includes
2573 # (ex: this happens during hg rebase --abort).
2573 # (ex: this happens during hg rebase --abort).
2574 if not branchmerge:
2574 if not branchmerge:
2575 sparse.prunetemporaryincludes(repo)
2575 sparse.prunetemporaryincludes(repo)
2576
2576
2577 if not partial:
2577 if not partial:
2578 repo.hook(
2578 repo.hook(
2579 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2579 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2580 )
2580 )
2581 return stats
2581 return stats
2582
2582
2583
2583
2584 def graft(
2584 def graft(
2585 repo, ctx, pctx, labels=None, keepparent=False, keepconflictparent=False
2585 repo, ctx, pctx, labels=None, keepparent=False, keepconflictparent=False
2586 ):
2586 ):
2587 """Do a graft-like merge.
2587 """Do a graft-like merge.
2588
2588
2589 This is a merge where the merge ancestor is chosen such that one
2589 This is a merge where the merge ancestor is chosen such that one
2590 or more changesets are grafted onto the current changeset. In
2590 or more changesets are grafted onto the current changeset. In
2591 addition to the merge, this fixes up the dirstate to include only
2591 addition to the merge, this fixes up the dirstate to include only
2592 a single parent (if keepparent is False) and tries to duplicate any
2592 a single parent (if keepparent is False) and tries to duplicate any
2593 renames/copies appropriately.
2593 renames/copies appropriately.
2594
2594
2595 ctx - changeset to rebase
2595 ctx - changeset to rebase
2596 pctx - merge base, usually ctx.p1()
2596 pctx - merge base, usually ctx.p1()
2597 labels - merge labels eg ['local', 'graft']
2597 labels - merge labels eg ['local', 'graft']
2598 keepparent - keep second parent if any
2598 keepparent - keep second parent if any
2599 keepconflictparent - if unresolved, keep parent used for the merge
2599 keepconflictparent - if unresolved, keep parent used for the merge
2600
2600
2601 """
2601 """
2602 # If we're grafting a descendant onto an ancestor, be sure to pass
2602 # If we're grafting a descendant onto an ancestor, be sure to pass
2603 # mergeancestor=True to update. This does two things: 1) allows the merge if
2603 # mergeancestor=True to update. This does two things: 1) allows the merge if
2604 # the destination is the same as the parent of the ctx (so we can use graft
2604 # the destination is the same as the parent of the ctx (so we can use graft
2605 # to copy commits), and 2) informs update that the incoming changes are
2605 # to copy commits), and 2) informs update that the incoming changes are
2606 # newer than the destination so it doesn't prompt about "remote changed foo
2606 # newer than the destination so it doesn't prompt about "remote changed foo
2607 # which local deleted".
2607 # which local deleted".
2608 mergeancestor = repo.changelog.isancestor(repo[b'.'].node(), ctx.node())
2608 mergeancestor = repo.changelog.isancestor(repo[b'.'].node(), ctx.node())
2609
2609
2610 stats = update(
2610 stats = update(
2611 repo,
2611 repo,
2612 ctx.node(),
2612 ctx.node(),
2613 True,
2613 True,
2614 True,
2614 True,
2615 pctx.node(),
2615 pctx.node(),
2616 mergeancestor=mergeancestor,
2616 mergeancestor=mergeancestor,
2617 labels=labels,
2617 labels=labels,
2618 )
2618 )
2619
2619
2620 if keepconflictparent and stats.unresolvedcount:
2620 if keepconflictparent and stats.unresolvedcount:
2621 pother = ctx.node()
2621 pother = ctx.node()
2622 else:
2622 else:
2623 pother = nullid
2623 pother = nullid
2624 parents = ctx.parents()
2624 parents = ctx.parents()
2625 if keepparent and len(parents) == 2 and pctx in parents:
2625 if keepparent and len(parents) == 2 and pctx in parents:
2626 parents.remove(pctx)
2626 parents.remove(pctx)
2627 pother = parents[0].node()
2627 pother = parents[0].node()
2628
2628
2629 with repo.dirstate.parentchange():
2629 with repo.dirstate.parentchange():
2630 repo.setparents(repo[b'.'].node(), pother)
2630 repo.setparents(repo[b'.'].node(), pother)
2631 repo.dirstate.write(repo.currenttransaction())
2631 repo.dirstate.write(repo.currenttransaction())
2632 # fix up dirstate for copies and renames
2632 # fix up dirstate for copies and renames
2633 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2633 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2634 return stats
2634 return stats
2635
2635
2636
2636
2637 def purge(
2637 def purge(
2638 repo,
2638 repo,
2639 matcher,
2639 matcher,
2640 ignored=False,
2640 ignored=False,
2641 removeemptydirs=True,
2641 removeemptydirs=True,
2642 removefiles=True,
2642 removefiles=True,
2643 abortonerror=False,
2643 abortonerror=False,
2644 noop=False,
2644 noop=False,
2645 ):
2645 ):
2646 """Purge the working directory of untracked files.
2646 """Purge the working directory of untracked files.
2647
2647
2648 ``matcher`` is a matcher configured to scan the working directory -
2648 ``matcher`` is a matcher configured to scan the working directory -
2649 potentially a subset.
2649 potentially a subset.
2650
2650
2651 ``ignored`` controls whether ignored files should also be purged.
2651 ``ignored`` controls whether ignored files should also be purged.
2652
2652
2653 ``removeemptydirs`` controls whether empty directories should be removed.
2653 ``removeemptydirs`` controls whether empty directories should be removed.
2654
2654
2655 ``removefiles`` controls whether files are removed.
2655 ``removefiles`` controls whether files are removed.
2656
2656
2657 ``abortonerror`` causes an exception to be raised if an error occurs
2657 ``abortonerror`` causes an exception to be raised if an error occurs
2658 deleting a file or directory.
2658 deleting a file or directory.
2659
2659
2660 ``noop`` controls whether to actually remove files. If not defined, actions
2660 ``noop`` controls whether to actually remove files. If not defined, actions
2661 will be taken.
2661 will be taken.
2662
2662
2663 Returns an iterable of relative paths in the working directory that were
2663 Returns an iterable of relative paths in the working directory that were
2664 or would be removed.
2664 or would be removed.
2665 """
2665 """
2666
2666
2667 def remove(removefn, path):
2667 def remove(removefn, path):
2668 try:
2668 try:
2669 removefn(path)
2669 removefn(path)
2670 except OSError:
2670 except OSError:
2671 m = _(b'%s cannot be removed') % path
2671 m = _(b'%s cannot be removed') % path
2672 if abortonerror:
2672 if abortonerror:
2673 raise error.Abort(m)
2673 raise error.Abort(m)
2674 else:
2674 else:
2675 repo.ui.warn(_(b'warning: %s\n') % m)
2675 repo.ui.warn(_(b'warning: %s\n') % m)
2676
2676
2677 # There's no API to copy a matcher. So mutate the passed matcher and
2677 # There's no API to copy a matcher. So mutate the passed matcher and
2678 # restore it when we're done.
2678 # restore it when we're done.
2679 oldexplicitdir = matcher.explicitdir
2680 oldtraversedir = matcher.traversedir
2679 oldtraversedir = matcher.traversedir
2681
2680
2682 res = []
2681 res = []
2683
2682
2684 try:
2683 try:
2685 if removeemptydirs:
2684 if removeemptydirs:
2686 directories = []
2685 directories = []
2687 matcher.explicitdir = matcher.traversedir = directories.append
2686 matcher.traversedir = directories.append
2688
2687
2689 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2688 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2690
2689
2691 if removefiles:
2690 if removefiles:
2692 for f in sorted(status.unknown + status.ignored):
2691 for f in sorted(status.unknown + status.ignored):
2693 if not noop:
2692 if not noop:
2694 repo.ui.note(_(b'removing file %s\n') % f)
2693 repo.ui.note(_(b'removing file %s\n') % f)
2695 remove(repo.wvfs.unlink, f)
2694 remove(repo.wvfs.unlink, f)
2696 res.append(f)
2695 res.append(f)
2697
2696
2698 if removeemptydirs:
2697 if removeemptydirs:
2699 for f in sorted(directories, reverse=True):
2698 for f in sorted(directories, reverse=True):
2700 if matcher(f) and not repo.wvfs.listdir(f):
2699 if matcher(f) and not repo.wvfs.listdir(f):
2701 if not noop:
2700 if not noop:
2702 repo.ui.note(_(b'removing directory %s\n') % f)
2701 repo.ui.note(_(b'removing directory %s\n') % f)
2703 remove(repo.wvfs.rmdir, f)
2702 remove(repo.wvfs.rmdir, f)
2704 res.append(f)
2703 res.append(f)
2705
2704
2706 return res
2705 return res
2707
2706
2708 finally:
2707 finally:
2709 matcher.explicitdir = oldexplicitdir
2710 matcher.traversedir = oldtraversedir
2708 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now