##// END OF EJS Templates
copies: define a type to return from mergecopies()...
Martin von Zweigbergk -
r44681:7f8bdee0 default
parent child Browse files
Show More
@@ -1,1151 +1,1165 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import multiprocessing
11 import multiprocessing
12 import os
12 import os
13
13
14 from .i18n import _
14 from .i18n import _
15
15
16
16
17 from .revlogutils.flagutil import REVIDX_SIDEDATA
17 from .revlogutils.flagutil import REVIDX_SIDEDATA
18
18
19 from . import (
19 from . import (
20 error,
20 error,
21 match as matchmod,
21 match as matchmod,
22 node,
22 node,
23 pathutil,
23 pathutil,
24 pycompat,
24 pycompat,
25 util,
25 util,
26 )
26 )
27
27
28 from .revlogutils import sidedata as sidedatamod
28 from .revlogutils import sidedata as sidedatamod
29
29
30 from .utils import stringutil
30 from .utils import stringutil
31
31
32
32
33 def _filter(src, dst, t):
33 def _filter(src, dst, t):
34 """filters out invalid copies after chaining"""
34 """filters out invalid copies after chaining"""
35
35
36 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
36 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
37 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
37 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
38 # in the following table (not including trivial cases). For example, case 2
38 # in the following table (not including trivial cases). For example, case 2
39 # is where a file existed in 'src' and remained under that name in 'mid' and
39 # is where a file existed in 'src' and remained under that name in 'mid' and
40 # then was renamed between 'mid' and 'dst'.
40 # then was renamed between 'mid' and 'dst'.
41 #
41 #
42 # case src mid dst result
42 # case src mid dst result
43 # 1 x y - -
43 # 1 x y - -
44 # 2 x y y x->y
44 # 2 x y y x->y
45 # 3 x y x -
45 # 3 x y x -
46 # 4 x y z x->z
46 # 4 x y z x->z
47 # 5 - x y -
47 # 5 - x y -
48 # 6 x x y x->y
48 # 6 x x y x->y
49 #
49 #
50 # _chain() takes care of chaining the copies in 'a' and 'b', but it
50 # _chain() takes care of chaining the copies in 'a' and 'b', but it
51 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
51 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
52 # between 5 and 6, so it includes all cases in its result.
52 # between 5 and 6, so it includes all cases in its result.
53 # Cases 1, 3, and 5 are then removed by _filter().
53 # Cases 1, 3, and 5 are then removed by _filter().
54
54
55 for k, v in list(t.items()):
55 for k, v in list(t.items()):
56 # remove copies from files that didn't exist
56 # remove copies from files that didn't exist
57 if v not in src:
57 if v not in src:
58 del t[k]
58 del t[k]
59 # remove criss-crossed copies
59 # remove criss-crossed copies
60 elif k in src and v in dst:
60 elif k in src and v in dst:
61 del t[k]
61 del t[k]
62 # remove copies to files that were then removed
62 # remove copies to files that were then removed
63 elif k not in dst:
63 elif k not in dst:
64 del t[k]
64 del t[k]
65
65
66
66
67 def _chain(prefix, suffix):
67 def _chain(prefix, suffix):
68 """chain two sets of copies 'prefix' and 'suffix'"""
68 """chain two sets of copies 'prefix' and 'suffix'"""
69 result = prefix.copy()
69 result = prefix.copy()
70 for key, value in pycompat.iteritems(suffix):
70 for key, value in pycompat.iteritems(suffix):
71 result[key] = prefix.get(value, value)
71 result[key] = prefix.get(value, value)
72 return result
72 return result
73
73
74
74
75 def _tracefile(fctx, am, basemf):
75 def _tracefile(fctx, am, basemf):
76 """return file context that is the ancestor of fctx present in ancestor
76 """return file context that is the ancestor of fctx present in ancestor
77 manifest am
77 manifest am
78
78
79 Note: we used to try and stop after a given limit, however checking if that
79 Note: we used to try and stop after a given limit, however checking if that
80 limit is reached turned out to be very expensive. we are better off
80 limit is reached turned out to be very expensive. we are better off
81 disabling that feature."""
81 disabling that feature."""
82
82
83 for f in fctx.ancestors():
83 for f in fctx.ancestors():
84 path = f.path()
84 path = f.path()
85 if am.get(path, None) == f.filenode():
85 if am.get(path, None) == f.filenode():
86 return path
86 return path
87 if basemf and basemf.get(path, None) == f.filenode():
87 if basemf and basemf.get(path, None) == f.filenode():
88 return path
88 return path
89
89
90
90
91 def _dirstatecopies(repo, match=None):
91 def _dirstatecopies(repo, match=None):
92 ds = repo.dirstate
92 ds = repo.dirstate
93 c = ds.copies().copy()
93 c = ds.copies().copy()
94 for k in list(c):
94 for k in list(c):
95 if ds[k] not in b'anm' or (match and not match(k)):
95 if ds[k] not in b'anm' or (match and not match(k)):
96 del c[k]
96 del c[k]
97 return c
97 return c
98
98
99
99
100 def _computeforwardmissing(a, b, match=None):
100 def _computeforwardmissing(a, b, match=None):
101 """Computes which files are in b but not a.
101 """Computes which files are in b but not a.
102 This is its own function so extensions can easily wrap this call to see what
102 This is its own function so extensions can easily wrap this call to see what
103 files _forwardcopies is about to process.
103 files _forwardcopies is about to process.
104 """
104 """
105 ma = a.manifest()
105 ma = a.manifest()
106 mb = b.manifest()
106 mb = b.manifest()
107 return mb.filesnotin(ma, match=match)
107 return mb.filesnotin(ma, match=match)
108
108
109
109
110 def usechangesetcentricalgo(repo):
110 def usechangesetcentricalgo(repo):
111 """Checks if we should use changeset-centric copy algorithms"""
111 """Checks if we should use changeset-centric copy algorithms"""
112 if repo.filecopiesmode == b'changeset-sidedata':
112 if repo.filecopiesmode == b'changeset-sidedata':
113 return True
113 return True
114 readfrom = repo.ui.config(b'experimental', b'copies.read-from')
114 readfrom = repo.ui.config(b'experimental', b'copies.read-from')
115 changesetsource = (b'changeset-only', b'compatibility')
115 changesetsource = (b'changeset-only', b'compatibility')
116 return readfrom in changesetsource
116 return readfrom in changesetsource
117
117
118
118
119 def _committedforwardcopies(a, b, base, match):
119 def _committedforwardcopies(a, b, base, match):
120 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
120 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
121 # files might have to be traced back to the fctx parent of the last
121 # files might have to be traced back to the fctx parent of the last
122 # one-side-only changeset, but not further back than that
122 # one-side-only changeset, but not further back than that
123 repo = a._repo
123 repo = a._repo
124
124
125 if usechangesetcentricalgo(repo):
125 if usechangesetcentricalgo(repo):
126 return _changesetforwardcopies(a, b, match)
126 return _changesetforwardcopies(a, b, match)
127
127
128 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
128 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
129 dbg = repo.ui.debug
129 dbg = repo.ui.debug
130 if debug:
130 if debug:
131 dbg(b'debug.copies: looking into rename from %s to %s\n' % (a, b))
131 dbg(b'debug.copies: looking into rename from %s to %s\n' % (a, b))
132 am = a.manifest()
132 am = a.manifest()
133 basemf = None if base is None else base.manifest()
133 basemf = None if base is None else base.manifest()
134
134
135 # find where new files came from
135 # find where new files came from
136 # we currently don't try to find where old files went, too expensive
136 # we currently don't try to find where old files went, too expensive
137 # this means we can miss a case like 'hg rm b; hg cp a b'
137 # this means we can miss a case like 'hg rm b; hg cp a b'
138 cm = {}
138 cm = {}
139
139
140 # Computing the forward missing is quite expensive on large manifests, since
140 # Computing the forward missing is quite expensive on large manifests, since
141 # it compares the entire manifests. We can optimize it in the common use
141 # it compares the entire manifests. We can optimize it in the common use
142 # case of computing what copies are in a commit versus its parent (like
142 # case of computing what copies are in a commit versus its parent (like
143 # during a rebase or histedit). Note, we exclude merge commits from this
143 # during a rebase or histedit). Note, we exclude merge commits from this
144 # optimization, since the ctx.files() for a merge commit is not correct for
144 # optimization, since the ctx.files() for a merge commit is not correct for
145 # this comparison.
145 # this comparison.
146 forwardmissingmatch = match
146 forwardmissingmatch = match
147 if b.p1() == a and b.p2().node() == node.nullid:
147 if b.p1() == a and b.p2().node() == node.nullid:
148 filesmatcher = matchmod.exact(b.files())
148 filesmatcher = matchmod.exact(b.files())
149 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
149 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
150 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
150 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
151
151
152 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
152 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
153
153
154 if debug:
154 if debug:
155 dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
155 dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
156
156
157 for f in sorted(missing):
157 for f in sorted(missing):
158 if debug:
158 if debug:
159 dbg(b'debug.copies: tracing file: %s\n' % f)
159 dbg(b'debug.copies: tracing file: %s\n' % f)
160 fctx = b[f]
160 fctx = b[f]
161 fctx._ancestrycontext = ancestrycontext
161 fctx._ancestrycontext = ancestrycontext
162
162
163 if debug:
163 if debug:
164 start = util.timer()
164 start = util.timer()
165 opath = _tracefile(fctx, am, basemf)
165 opath = _tracefile(fctx, am, basemf)
166 if opath:
166 if opath:
167 if debug:
167 if debug:
168 dbg(b'debug.copies: rename of: %s\n' % opath)
168 dbg(b'debug.copies: rename of: %s\n' % opath)
169 cm[f] = opath
169 cm[f] = opath
170 if debug:
170 if debug:
171 dbg(
171 dbg(
172 b'debug.copies: time: %f seconds\n'
172 b'debug.copies: time: %f seconds\n'
173 % (util.timer() - start)
173 % (util.timer() - start)
174 )
174 )
175 return cm
175 return cm
176
176
177
177
178 def _revinfogetter(repo):
178 def _revinfogetter(repo):
179 """return a function that return multiple data given a <rev>"i
179 """return a function that return multiple data given a <rev>"i
180
180
181 * p1: revision number of first parent
181 * p1: revision number of first parent
182 * p2: revision number of first parent
182 * p2: revision number of first parent
183 * p1copies: mapping of copies from p1
183 * p1copies: mapping of copies from p1
184 * p2copies: mapping of copies from p2
184 * p2copies: mapping of copies from p2
185 * removed: a list of removed files
185 * removed: a list of removed files
186 """
186 """
187 cl = repo.changelog
187 cl = repo.changelog
188 parents = cl.parentrevs
188 parents = cl.parentrevs
189
189
190 if repo.filecopiesmode == b'changeset-sidedata':
190 if repo.filecopiesmode == b'changeset-sidedata':
191 changelogrevision = cl.changelogrevision
191 changelogrevision = cl.changelogrevision
192 flags = cl.flags
192 flags = cl.flags
193
193
194 # A small cache to avoid doing the work twice for merges
194 # A small cache to avoid doing the work twice for merges
195 #
195 #
196 # In the vast majority of cases, if we ask information for a revision
196 # In the vast majority of cases, if we ask information for a revision
197 # about 1 parent, we'll later ask it for the other. So it make sense to
197 # about 1 parent, we'll later ask it for the other. So it make sense to
198 # keep the information around when reaching the first parent of a merge
198 # keep the information around when reaching the first parent of a merge
199 # and dropping it after it was provided for the second parents.
199 # and dropping it after it was provided for the second parents.
200 #
200 #
201 # It exists cases were only one parent of the merge will be walked. It
201 # It exists cases were only one parent of the merge will be walked. It
202 # happens when the "destination" the copy tracing is descendant from a
202 # happens when the "destination" the copy tracing is descendant from a
203 # new root, not common with the "source". In that case, we will only walk
203 # new root, not common with the "source". In that case, we will only walk
204 # through merge parents that are descendant of changesets common
204 # through merge parents that are descendant of changesets common
205 # between "source" and "destination".
205 # between "source" and "destination".
206 #
206 #
207 # With the current case implementation if such changesets have a copy
207 # With the current case implementation if such changesets have a copy
208 # information, we'll keep them in memory until the end of
208 # information, we'll keep them in memory until the end of
209 # _changesetforwardcopies. We don't expect the case to be frequent
209 # _changesetforwardcopies. We don't expect the case to be frequent
210 # enough to matters.
210 # enough to matters.
211 #
211 #
212 # In addition, it would be possible to reach pathological case, were
212 # In addition, it would be possible to reach pathological case, were
213 # many first parent are met before any second parent is reached. In
213 # many first parent are met before any second parent is reached. In
214 # that case the cache could grow. If this even become an issue one can
214 # that case the cache could grow. If this even become an issue one can
215 # safely introduce a maximum cache size. This would trade extra CPU/IO
215 # safely introduce a maximum cache size. This would trade extra CPU/IO
216 # time to save memory.
216 # time to save memory.
217 merge_caches = {}
217 merge_caches = {}
218
218
219 def revinfo(rev):
219 def revinfo(rev):
220 p1, p2 = parents(rev)
220 p1, p2 = parents(rev)
221 if flags(rev) & REVIDX_SIDEDATA:
221 if flags(rev) & REVIDX_SIDEDATA:
222 e = merge_caches.pop(rev, None)
222 e = merge_caches.pop(rev, None)
223 if e is not None:
223 if e is not None:
224 return e
224 return e
225 c = changelogrevision(rev)
225 c = changelogrevision(rev)
226 p1copies = c.p1copies
226 p1copies = c.p1copies
227 p2copies = c.p2copies
227 p2copies = c.p2copies
228 removed = c.filesremoved
228 removed = c.filesremoved
229 if p1 != node.nullrev and p2 != node.nullrev:
229 if p1 != node.nullrev and p2 != node.nullrev:
230 # XXX some case we over cache, IGNORE
230 # XXX some case we over cache, IGNORE
231 merge_caches[rev] = (p1, p2, p1copies, p2copies, removed)
231 merge_caches[rev] = (p1, p2, p1copies, p2copies, removed)
232 else:
232 else:
233 p1copies = {}
233 p1copies = {}
234 p2copies = {}
234 p2copies = {}
235 removed = []
235 removed = []
236 return p1, p2, p1copies, p2copies, removed
236 return p1, p2, p1copies, p2copies, removed
237
237
238 else:
238 else:
239
239
240 def revinfo(rev):
240 def revinfo(rev):
241 p1, p2 = parents(rev)
241 p1, p2 = parents(rev)
242 ctx = repo[rev]
242 ctx = repo[rev]
243 p1copies, p2copies = ctx._copies
243 p1copies, p2copies = ctx._copies
244 removed = ctx.filesremoved()
244 removed = ctx.filesremoved()
245 return p1, p2, p1copies, p2copies, removed
245 return p1, p2, p1copies, p2copies, removed
246
246
247 return revinfo
247 return revinfo
248
248
249
249
250 def _changesetforwardcopies(a, b, match):
250 def _changesetforwardcopies(a, b, match):
251 if a.rev() in (node.nullrev, b.rev()):
251 if a.rev() in (node.nullrev, b.rev()):
252 return {}
252 return {}
253
253
254 repo = a.repo().unfiltered()
254 repo = a.repo().unfiltered()
255 children = {}
255 children = {}
256 revinfo = _revinfogetter(repo)
256 revinfo = _revinfogetter(repo)
257
257
258 cl = repo.changelog
258 cl = repo.changelog
259 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
259 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
260 mrset = set(missingrevs)
260 mrset = set(missingrevs)
261 roots = set()
261 roots = set()
262 for r in missingrevs:
262 for r in missingrevs:
263 for p in cl.parentrevs(r):
263 for p in cl.parentrevs(r):
264 if p == node.nullrev:
264 if p == node.nullrev:
265 continue
265 continue
266 if p not in children:
266 if p not in children:
267 children[p] = [r]
267 children[p] = [r]
268 else:
268 else:
269 children[p].append(r)
269 children[p].append(r)
270 if p not in mrset:
270 if p not in mrset:
271 roots.add(p)
271 roots.add(p)
272 if not roots:
272 if not roots:
273 # no common revision to track copies from
273 # no common revision to track copies from
274 return {}
274 return {}
275 min_root = min(roots)
275 min_root = min(roots)
276
276
277 from_head = set(
277 from_head = set(
278 cl.reachableroots(min_root, [b.rev()], list(roots), includepath=True)
278 cl.reachableroots(min_root, [b.rev()], list(roots), includepath=True)
279 )
279 )
280
280
281 iterrevs = set(from_head)
281 iterrevs = set(from_head)
282 iterrevs &= mrset
282 iterrevs &= mrset
283 iterrevs.update(roots)
283 iterrevs.update(roots)
284 iterrevs.remove(b.rev())
284 iterrevs.remove(b.rev())
285 revs = sorted(iterrevs)
285 revs = sorted(iterrevs)
286 return _combinechangesetcopies(revs, children, b.rev(), revinfo, match)
286 return _combinechangesetcopies(revs, children, b.rev(), revinfo, match)
287
287
288
288
289 def _combinechangesetcopies(revs, children, targetrev, revinfo, match):
289 def _combinechangesetcopies(revs, children, targetrev, revinfo, match):
290 """combine the copies information for each item of iterrevs
290 """combine the copies information for each item of iterrevs
291
291
292 revs: sorted iterable of revision to visit
292 revs: sorted iterable of revision to visit
293 children: a {parent: [children]} mapping.
293 children: a {parent: [children]} mapping.
294 targetrev: the final copies destination revision (not in iterrevs)
294 targetrev: the final copies destination revision (not in iterrevs)
295 revinfo(rev): a function that return (p1, p2, p1copies, p2copies, removed)
295 revinfo(rev): a function that return (p1, p2, p1copies, p2copies, removed)
296 match: a matcher
296 match: a matcher
297
297
298 It returns the aggregated copies information for `targetrev`.
298 It returns the aggregated copies information for `targetrev`.
299 """
299 """
300 all_copies = {}
300 all_copies = {}
301 alwaysmatch = match.always()
301 alwaysmatch = match.always()
302 for r in revs:
302 for r in revs:
303 copies = all_copies.pop(r, None)
303 copies = all_copies.pop(r, None)
304 if copies is None:
304 if copies is None:
305 # this is a root
305 # this is a root
306 copies = {}
306 copies = {}
307 for i, c in enumerate(children[r]):
307 for i, c in enumerate(children[r]):
308 p1, p2, p1copies, p2copies, removed = revinfo(c)
308 p1, p2, p1copies, p2copies, removed = revinfo(c)
309 if r == p1:
309 if r == p1:
310 parent = 1
310 parent = 1
311 childcopies = p1copies
311 childcopies = p1copies
312 else:
312 else:
313 assert r == p2
313 assert r == p2
314 parent = 2
314 parent = 2
315 childcopies = p2copies
315 childcopies = p2copies
316 if not alwaysmatch:
316 if not alwaysmatch:
317 childcopies = {
317 childcopies = {
318 dst: src for dst, src in childcopies.items() if match(dst)
318 dst: src for dst, src in childcopies.items() if match(dst)
319 }
319 }
320 newcopies = copies
320 newcopies = copies
321 if childcopies:
321 if childcopies:
322 newcopies = _chain(newcopies, childcopies)
322 newcopies = _chain(newcopies, childcopies)
323 # _chain makes a copies, we can avoid doing so in some
323 # _chain makes a copies, we can avoid doing so in some
324 # simple/linear cases.
324 # simple/linear cases.
325 assert newcopies is not copies
325 assert newcopies is not copies
326 for f in removed:
326 for f in removed:
327 if f in newcopies:
327 if f in newcopies:
328 if newcopies is copies:
328 if newcopies is copies:
329 # copy on write to avoid affecting potential other
329 # copy on write to avoid affecting potential other
330 # branches. when there are no other branches, this
330 # branches. when there are no other branches, this
331 # could be avoided.
331 # could be avoided.
332 newcopies = copies.copy()
332 newcopies = copies.copy()
333 del newcopies[f]
333 del newcopies[f]
334 othercopies = all_copies.get(c)
334 othercopies = all_copies.get(c)
335 if othercopies is None:
335 if othercopies is None:
336 all_copies[c] = newcopies
336 all_copies[c] = newcopies
337 else:
337 else:
338 # we are the second parent to work on c, we need to merge our
338 # we are the second parent to work on c, we need to merge our
339 # work with the other.
339 # work with the other.
340 #
340 #
341 # Unlike when copies are stored in the filelog, we consider
341 # Unlike when copies are stored in the filelog, we consider
342 # it a copy even if the destination already existed on the
342 # it a copy even if the destination already existed on the
343 # other branch. It's simply too expensive to check if the
343 # other branch. It's simply too expensive to check if the
344 # file existed in the manifest.
344 # file existed in the manifest.
345 #
345 #
346 # In case of conflict, parent 1 take precedence over parent 2.
346 # In case of conflict, parent 1 take precedence over parent 2.
347 # This is an arbitrary choice made anew when implementing
347 # This is an arbitrary choice made anew when implementing
348 # changeset based copies. It was made without regards with
348 # changeset based copies. It was made without regards with
349 # potential filelog related behavior.
349 # potential filelog related behavior.
350 if parent == 1:
350 if parent == 1:
351 othercopies.update(newcopies)
351 othercopies.update(newcopies)
352 else:
352 else:
353 newcopies.update(othercopies)
353 newcopies.update(othercopies)
354 all_copies[c] = newcopies
354 all_copies[c] = newcopies
355 return all_copies[targetrev]
355 return all_copies[targetrev]
356
356
357
357
358 def _forwardcopies(a, b, base=None, match=None):
358 def _forwardcopies(a, b, base=None, match=None):
359 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
359 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
360
360
361 if base is None:
361 if base is None:
362 base = a
362 base = a
363 match = a.repo().narrowmatch(match)
363 match = a.repo().narrowmatch(match)
364 # check for working copy
364 # check for working copy
365 if b.rev() is None:
365 if b.rev() is None:
366 cm = _committedforwardcopies(a, b.p1(), base, match)
366 cm = _committedforwardcopies(a, b.p1(), base, match)
367 # combine copies from dirstate if necessary
367 # combine copies from dirstate if necessary
368 copies = _chain(cm, _dirstatecopies(b._repo, match))
368 copies = _chain(cm, _dirstatecopies(b._repo, match))
369 else:
369 else:
370 copies = _committedforwardcopies(a, b, base, match)
370 copies = _committedforwardcopies(a, b, base, match)
371 return copies
371 return copies
372
372
373
373
374 def _backwardrenames(a, b, match):
374 def _backwardrenames(a, b, match):
375 if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
375 if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
376 return {}
376 return {}
377
377
378 # Even though we're not taking copies into account, 1:n rename situations
378 # Even though we're not taking copies into account, 1:n rename situations
379 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
379 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
380 # arbitrarily pick one of the renames.
380 # arbitrarily pick one of the renames.
381 # We don't want to pass in "match" here, since that would filter
381 # We don't want to pass in "match" here, since that would filter
382 # the destination by it. Since we're reversing the copies, we want
382 # the destination by it. Since we're reversing the copies, we want
383 # to filter the source instead.
383 # to filter the source instead.
384 f = _forwardcopies(b, a)
384 f = _forwardcopies(b, a)
385 r = {}
385 r = {}
386 for k, v in sorted(pycompat.iteritems(f)):
386 for k, v in sorted(pycompat.iteritems(f)):
387 if match and not match(v):
387 if match and not match(v):
388 continue
388 continue
389 # remove copies
389 # remove copies
390 if v in a:
390 if v in a:
391 continue
391 continue
392 r[v] = k
392 r[v] = k
393 return r
393 return r
394
394
395
395
396 def pathcopies(x, y, match=None):
396 def pathcopies(x, y, match=None):
397 """find {dst@y: src@x} copy mapping for directed compare"""
397 """find {dst@y: src@x} copy mapping for directed compare"""
398 repo = x._repo
398 repo = x._repo
399 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
399 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
400 if debug:
400 if debug:
401 repo.ui.debug(
401 repo.ui.debug(
402 b'debug.copies: searching copies from %s to %s\n' % (x, y)
402 b'debug.copies: searching copies from %s to %s\n' % (x, y)
403 )
403 )
404 if x == y or not x or not y:
404 if x == y or not x or not y:
405 return {}
405 return {}
406 a = y.ancestor(x)
406 a = y.ancestor(x)
407 if a == x:
407 if a == x:
408 if debug:
408 if debug:
409 repo.ui.debug(b'debug.copies: search mode: forward\n')
409 repo.ui.debug(b'debug.copies: search mode: forward\n')
410 if y.rev() is None and x == y.p1():
410 if y.rev() is None and x == y.p1():
411 # short-circuit to avoid issues with merge states
411 # short-circuit to avoid issues with merge states
412 return _dirstatecopies(repo, match)
412 return _dirstatecopies(repo, match)
413 copies = _forwardcopies(x, y, match=match)
413 copies = _forwardcopies(x, y, match=match)
414 elif a == y:
414 elif a == y:
415 if debug:
415 if debug:
416 repo.ui.debug(b'debug.copies: search mode: backward\n')
416 repo.ui.debug(b'debug.copies: search mode: backward\n')
417 copies = _backwardrenames(x, y, match=match)
417 copies = _backwardrenames(x, y, match=match)
418 else:
418 else:
419 if debug:
419 if debug:
420 repo.ui.debug(b'debug.copies: search mode: combined\n')
420 repo.ui.debug(b'debug.copies: search mode: combined\n')
421 base = None
421 base = None
422 if a.rev() != node.nullrev:
422 if a.rev() != node.nullrev:
423 base = x
423 base = x
424 copies = _chain(
424 copies = _chain(
425 _backwardrenames(x, a, match=match),
425 _backwardrenames(x, a, match=match),
426 _forwardcopies(a, y, base, match=match),
426 _forwardcopies(a, y, base, match=match),
427 )
427 )
428 _filter(x, y, copies)
428 _filter(x, y, copies)
429 return copies
429 return copies
430
430
431
431
432 def mergecopies(repo, c1, c2, base):
432 def mergecopies(repo, c1, c2, base):
433 """
433 """
434 Finds moves and copies between context c1 and c2 that are relevant for
434 Finds moves and copies between context c1 and c2 that are relevant for
435 merging. 'base' will be used as the merge base.
435 merging. 'base' will be used as the merge base.
436
436
437 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
437 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
438 files that were moved/ copied in one merge parent and modified in another.
438 files that were moved/ copied in one merge parent and modified in another.
439 For example:
439 For example:
440
440
441 o ---> 4 another commit
441 o ---> 4 another commit
442 |
442 |
443 | o ---> 3 commit that modifies a.txt
443 | o ---> 3 commit that modifies a.txt
444 | /
444 | /
445 o / ---> 2 commit that moves a.txt to b.txt
445 o / ---> 2 commit that moves a.txt to b.txt
446 |/
446 |/
447 o ---> 1 merge base
447 o ---> 1 merge base
448
448
449 If we try to rebase revision 3 on revision 4, since there is no a.txt in
449 If we try to rebase revision 3 on revision 4, since there is no a.txt in
450 revision 4, and if user have copytrace disabled, we prints the following
450 revision 4, and if user have copytrace disabled, we prints the following
451 message:
451 message:
452
452
453 ```other changed <file> which local deleted```
453 ```other changed <file> which local deleted```
454
454
455 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
455 Returns a tuple where:
456 "dirmove".
457
456
458 "copy" is a mapping from destination name -> source name,
457 "branch_copies" an instance of branch_copies.
459 where source is in c1 and destination is in c2 or vice-versa.
460
461 "movewithdir" is a mapping from source name -> destination name,
462 where the file at source present in one context but not the other
463 needs to be moved to destination by the merge process, because the
464 other context moved the directory it is in.
465
458
466 "diverge" is a mapping of source name -> list of destination names
459 "diverge" is a mapping of source name -> list of destination names
467 for divergent renames.
460 for divergent renames.
468
461
469 "renamedelete" is a mapping of source name -> list of destination
470 names for files deleted in c1 that were renamed in c2 or vice-versa.
471
472 "dirmove" is a mapping of detected source dir -> destination dir renames.
473 This is needed for handling changes to new files previously grafted into
474 renamed directories.
475
476 This function calls different copytracing algorithms based on config.
462 This function calls different copytracing algorithms based on config.
477 """
463 """
478 # avoid silly behavior for update from empty dir
464 # avoid silly behavior for update from empty dir
479 if not c1 or not c2 or c1 == c2:
465 if not c1 or not c2 or c1 == c2:
480 return {}, {}, {}, {}, {}
466 return branch_copies(), {}
481
467
482 narrowmatch = c1.repo().narrowmatch()
468 narrowmatch = c1.repo().narrowmatch()
483
469
484 # avoid silly behavior for parent -> working dir
470 # avoid silly behavior for parent -> working dir
485 if c2.node() is None and c1.node() == repo.dirstate.p1():
471 if c2.node() is None and c1.node() == repo.dirstate.p1():
486 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
472 return branch_copies(_dirstatecopies(repo, narrowmatch)), {}
487
473
488 copytracing = repo.ui.config(b'experimental', b'copytrace')
474 copytracing = repo.ui.config(b'experimental', b'copytrace')
489 if stringutil.parsebool(copytracing) is False:
475 if stringutil.parsebool(copytracing) is False:
490 # stringutil.parsebool() returns None when it is unable to parse the
476 # stringutil.parsebool() returns None when it is unable to parse the
491 # value, so we should rely on making sure copytracing is on such cases
477 # value, so we should rely on making sure copytracing is on such cases
492 return {}, {}, {}, {}, {}
478 return branch_copies(), {}
493
479
494 if usechangesetcentricalgo(repo):
480 if usechangesetcentricalgo(repo):
495 # The heuristics don't make sense when we need changeset-centric algos
481 # The heuristics don't make sense when we need changeset-centric algos
496 return _fullcopytracing(repo, c1, c2, base)
482 return _fullcopytracing(repo, c1, c2, base)
497
483
498 # Copy trace disabling is explicitly below the node == p1 logic above
484 # Copy trace disabling is explicitly below the node == p1 logic above
499 # because the logic above is required for a simple copy to be kept across a
485 # because the logic above is required for a simple copy to be kept across a
500 # rebase.
486 # rebase.
501 if copytracing == b'heuristics':
487 if copytracing == b'heuristics':
502 # Do full copytracing if only non-public revisions are involved as
488 # Do full copytracing if only non-public revisions are involved as
503 # that will be fast enough and will also cover the copies which could
489 # that will be fast enough and will also cover the copies which could
504 # be missed by heuristics
490 # be missed by heuristics
505 if _isfullcopytraceable(repo, c1, base):
491 if _isfullcopytraceable(repo, c1, base):
506 return _fullcopytracing(repo, c1, c2, base)
492 return _fullcopytracing(repo, c1, c2, base)
507 return _heuristicscopytracing(repo, c1, c2, base)
493 return _heuristicscopytracing(repo, c1, c2, base)
508 else:
494 else:
509 return _fullcopytracing(repo, c1, c2, base)
495 return _fullcopytracing(repo, c1, c2, base)
510
496
511
497
512 def _isfullcopytraceable(repo, c1, base):
498 def _isfullcopytraceable(repo, c1, base):
513 """ Checks that if base, source and destination are all no-public branches,
499 """ Checks that if base, source and destination are all no-public branches,
514 if yes let's use the full copytrace algorithm for increased capabilities
500 if yes let's use the full copytrace algorithm for increased capabilities
515 since it will be fast enough.
501 since it will be fast enough.
516
502
517 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
503 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
518 number of changesets from c1 to base such that if number of changesets are
504 number of changesets from c1 to base such that if number of changesets are
519 more than the limit, full copytracing algorithm won't be used.
505 more than the limit, full copytracing algorithm won't be used.
520 """
506 """
521 if c1.rev() is None:
507 if c1.rev() is None:
522 c1 = c1.p1()
508 c1 = c1.p1()
523 if c1.mutable() and base.mutable():
509 if c1.mutable() and base.mutable():
524 sourcecommitlimit = repo.ui.configint(
510 sourcecommitlimit = repo.ui.configint(
525 b'experimental', b'copytrace.sourcecommitlimit'
511 b'experimental', b'copytrace.sourcecommitlimit'
526 )
512 )
527 commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
513 commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
528 return commits < sourcecommitlimit
514 return commits < sourcecommitlimit
529 return False
515 return False
530
516
531
517
532 def _checksinglesidecopies(
518 def _checksinglesidecopies(
533 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
519 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
534 ):
520 ):
535 if src not in m2:
521 if src not in m2:
536 # deleted on side 2
522 # deleted on side 2
537 if src not in m1:
523 if src not in m1:
538 # renamed on side 1, deleted on side 2
524 # renamed on side 1, deleted on side 2
539 renamedelete[src] = dsts1
525 renamedelete[src] = dsts1
540 elif m2[src] != mb[src]:
526 elif m2[src] != mb[src]:
541 if not _related(c2[src], base[src]):
527 if not _related(c2[src], base[src]):
542 return
528 return
543 # modified on side 2
529 # modified on side 2
544 for dst in dsts1:
530 for dst in dsts1:
545 if dst not in m2:
531 if dst not in m2:
546 # dst not added on side 2 (handle as regular
532 # dst not added on side 2 (handle as regular
547 # "both created" case in manifestmerge otherwise)
533 # "both created" case in manifestmerge otherwise)
548 copy[dst] = src
534 copy[dst] = src
549
535
550
536
537 class branch_copies(object):
538 """Information about copies made on one side of a merge/graft.
539
540 "copy" is a mapping from destination name -> source name,
541 where source is in c1 and destination is in c2 or vice-versa.
542
543 "movewithdir" is a mapping from source name -> destination name,
544 where the file at source present in one context but not the other
545 needs to be moved to destination by the merge process, because the
546 other context moved the directory it is in.
547
548 "renamedelete" is a mapping of source name -> list of destination
549 names for files deleted in c1 that were renamed in c2 or vice-versa.
550
551 "dirmove" is a mapping of detected source dir -> destination dir renames.
552 This is needed for handling changes to new files previously grafted into
553 renamed directories.
554 """
555
556 def __init__(
557 self, copy=None, renamedelete=None, dirmove=None, movewithdir=None
558 ):
559 self.copy = {} if copy is None else copy
560 self.renamedelete = {} if renamedelete is None else renamedelete
561 self.dirmove = {} if dirmove is None else dirmove
562 self.movewithdir = {} if movewithdir is None else movewithdir
563
564
551 def _fullcopytracing(repo, c1, c2, base):
565 def _fullcopytracing(repo, c1, c2, base):
552 """ The full copytracing algorithm which finds all the new files that were
566 """ The full copytracing algorithm which finds all the new files that were
553 added from merge base up to the top commit and for each file it checks if
567 added from merge base up to the top commit and for each file it checks if
554 this file was copied from another file.
568 this file was copied from another file.
555
569
556 This is pretty slow when a lot of changesets are involved but will track all
570 This is pretty slow when a lot of changesets are involved but will track all
557 the copies.
571 the copies.
558 """
572 """
559 m1 = c1.manifest()
573 m1 = c1.manifest()
560 m2 = c2.manifest()
574 m2 = c2.manifest()
561 mb = base.manifest()
575 mb = base.manifest()
562
576
563 copies1 = pathcopies(base, c1)
577 copies1 = pathcopies(base, c1)
564 copies2 = pathcopies(base, c2)
578 copies2 = pathcopies(base, c2)
565
579
566 if not (copies1 or copies2):
580 if not (copies1 or copies2):
567 return {}, {}, {}, {}, {}
581 return branch_copies(), {}
568
582
569 inversecopies1 = {}
583 inversecopies1 = {}
570 inversecopies2 = {}
584 inversecopies2 = {}
571 for dst, src in copies1.items():
585 for dst, src in copies1.items():
572 inversecopies1.setdefault(src, []).append(dst)
586 inversecopies1.setdefault(src, []).append(dst)
573 for dst, src in copies2.items():
587 for dst, src in copies2.items():
574 inversecopies2.setdefault(src, []).append(dst)
588 inversecopies2.setdefault(src, []).append(dst)
575
589
576 copy1 = {}
590 copy1 = {}
577 copy2 = {}
591 copy2 = {}
578 diverge = {}
592 diverge = {}
579 renamedelete1 = {}
593 renamedelete1 = {}
580 renamedelete2 = {}
594 renamedelete2 = {}
581 allsources = set(inversecopies1) | set(inversecopies2)
595 allsources = set(inversecopies1) | set(inversecopies2)
582 for src in allsources:
596 for src in allsources:
583 dsts1 = inversecopies1.get(src)
597 dsts1 = inversecopies1.get(src)
584 dsts2 = inversecopies2.get(src)
598 dsts2 = inversecopies2.get(src)
585 if dsts1 and dsts2:
599 if dsts1 and dsts2:
586 # copied/renamed on both sides
600 # copied/renamed on both sides
587 if src not in m1 and src not in m2:
601 if src not in m1 and src not in m2:
588 # renamed on both sides
602 # renamed on both sides
589 dsts1 = set(dsts1)
603 dsts1 = set(dsts1)
590 dsts2 = set(dsts2)
604 dsts2 = set(dsts2)
591 # If there's some overlap in the rename destinations, we
605 # If there's some overlap in the rename destinations, we
592 # consider it not divergent. For example, if side 1 copies 'a'
606 # consider it not divergent. For example, if side 1 copies 'a'
593 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
607 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
594 # and 'd' and deletes 'a'.
608 # and 'd' and deletes 'a'.
595 if dsts1 & dsts2:
609 if dsts1 & dsts2:
596 for dst in dsts1 & dsts2:
610 for dst in dsts1 & dsts2:
597 copy1[dst] = src
611 copy1[dst] = src
598 copy2[dst] = src
612 copy2[dst] = src
599 else:
613 else:
600 diverge[src] = sorted(dsts1 | dsts2)
614 diverge[src] = sorted(dsts1 | dsts2)
601 elif src in m1 and src in m2:
615 elif src in m1 and src in m2:
602 # copied on both sides
616 # copied on both sides
603 dsts1 = set(dsts1)
617 dsts1 = set(dsts1)
604 dsts2 = set(dsts2)
618 dsts2 = set(dsts2)
605 for dst in dsts1 & dsts2:
619 for dst in dsts1 & dsts2:
606 copy1[dst] = src
620 copy1[dst] = src
607 copy2[dst] = src
621 copy2[dst] = src
608 # TODO: Handle cases where it was renamed on one side and copied
622 # TODO: Handle cases where it was renamed on one side and copied
609 # on the other side
623 # on the other side
610 elif dsts1:
624 elif dsts1:
611 # copied/renamed only on side 1
625 # copied/renamed only on side 1
612 _checksinglesidecopies(
626 _checksinglesidecopies(
613 src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
627 src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
614 )
628 )
615 elif dsts2:
629 elif dsts2:
616 # copied/renamed only on side 2
630 # copied/renamed only on side 2
617 _checksinglesidecopies(
631 _checksinglesidecopies(
618 src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
632 src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
619 )
633 )
620
634
621 # find interesting file sets from manifests
635 # find interesting file sets from manifests
622 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
636 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
623 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
637 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
624 u1 = sorted(addedinm1 - addedinm2)
638 u1 = sorted(addedinm1 - addedinm2)
625 u2 = sorted(addedinm2 - addedinm1)
639 u2 = sorted(addedinm2 - addedinm1)
626
640
627 header = b" unmatched files in %s"
641 header = b" unmatched files in %s"
628 if u1:
642 if u1:
629 repo.ui.debug(b"%s:\n %s\n" % (header % b'local', b"\n ".join(u1)))
643 repo.ui.debug(b"%s:\n %s\n" % (header % b'local', b"\n ".join(u1)))
630 if u2:
644 if u2:
631 repo.ui.debug(b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2)))
645 repo.ui.debug(b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2)))
632
646
633 if repo.ui.debugflag:
647 if repo.ui.debugflag:
634 renamedeleteset = set()
648 renamedeleteset = set()
635 divergeset = set()
649 divergeset = set()
636 for dsts in diverge.values():
650 for dsts in diverge.values():
637 divergeset.update(dsts)
651 divergeset.update(dsts)
638 for dsts in renamedelete1.values():
652 for dsts in renamedelete1.values():
639 renamedeleteset.update(dsts)
653 renamedeleteset.update(dsts)
640 for dsts in renamedelete2.values():
654 for dsts in renamedelete2.values():
641 renamedeleteset.update(dsts)
655 renamedeleteset.update(dsts)
642
656
643 repo.ui.debug(
657 repo.ui.debug(
644 b" all copies found (* = to merge, ! = divergent, "
658 b" all copies found (* = to merge, ! = divergent, "
645 b"% = renamed and deleted):\n"
659 b"% = renamed and deleted):\n"
646 )
660 )
647 for side, copies in ((b"local", copies1), (b"remote", copies2)):
661 for side, copies in ((b"local", copies1), (b"remote", copies2)):
648 if not copies:
662 if not copies:
649 continue
663 continue
650 repo.ui.debug(b" on %s side:\n" % side)
664 repo.ui.debug(b" on %s side:\n" % side)
651 for f in sorted(copies):
665 for f in sorted(copies):
652 note = b""
666 note = b""
653 if f in copy1 or f in copy2:
667 if f in copy1 or f in copy2:
654 note += b"*"
668 note += b"*"
655 if f in divergeset:
669 if f in divergeset:
656 note += b"!"
670 note += b"!"
657 if f in renamedeleteset:
671 if f in renamedeleteset:
658 note += b"%"
672 note += b"%"
659 repo.ui.debug(
673 repo.ui.debug(
660 b" src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
674 b" src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
661 )
675 )
662 del renamedeleteset
676 del renamedeleteset
663 del divergeset
677 del divergeset
664
678
665 repo.ui.debug(b" checking for directory renames\n")
679 repo.ui.debug(b" checking for directory renames\n")
666
680
667 dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2)
681 dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2)
668 dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1)
682 dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1)
669
683
670 copy1.update(copy2)
684 copy1.update(copy2)
671 renamedelete1.update(renamedelete2)
685 renamedelete1.update(renamedelete2)
672 movewithdir1.update(movewithdir2)
686 movewithdir1.update(movewithdir2)
673 dirmove1.update(dirmove2)
687 dirmove1.update(dirmove2)
674
688
675 return copy1, movewithdir1, diverge, renamedelete1, dirmove1
689 return branch_copies(copy1, renamedelete1, dirmove1, movewithdir1), diverge
676
690
677
691
678 def _dir_renames(repo, ctx, copy, fullcopy, addedfiles):
692 def _dir_renames(repo, ctx, copy, fullcopy, addedfiles):
679 """Finds moved directories and files that should move with them.
693 """Finds moved directories and files that should move with them.
680
694
681 ctx: the context for one of the sides
695 ctx: the context for one of the sides
682 copy: files copied on the same side (as ctx)
696 copy: files copied on the same side (as ctx)
683 fullcopy: files copied on the same side (as ctx), including those that
697 fullcopy: files copied on the same side (as ctx), including those that
684 merge.manifestmerge() won't care about
698 merge.manifestmerge() won't care about
685 addedfiles: added files on the other side (compared to ctx)
699 addedfiles: added files on the other side (compared to ctx)
686 """
700 """
687 # generate a directory move map
701 # generate a directory move map
688 d = ctx.dirs()
702 d = ctx.dirs()
689 invalid = set()
703 invalid = set()
690 dirmove = {}
704 dirmove = {}
691
705
692 # examine each file copy for a potential directory move, which is
706 # examine each file copy for a potential directory move, which is
693 # when all the files in a directory are moved to a new directory
707 # when all the files in a directory are moved to a new directory
694 for dst, src in pycompat.iteritems(fullcopy):
708 for dst, src in pycompat.iteritems(fullcopy):
695 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
709 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
696 if dsrc in invalid:
710 if dsrc in invalid:
697 # already seen to be uninteresting
711 # already seen to be uninteresting
698 continue
712 continue
699 elif dsrc in d and ddst in d:
713 elif dsrc in d and ddst in d:
700 # directory wasn't entirely moved locally
714 # directory wasn't entirely moved locally
701 invalid.add(dsrc)
715 invalid.add(dsrc)
702 elif dsrc in dirmove and dirmove[dsrc] != ddst:
716 elif dsrc in dirmove and dirmove[dsrc] != ddst:
703 # files from the same directory moved to two different places
717 # files from the same directory moved to two different places
704 invalid.add(dsrc)
718 invalid.add(dsrc)
705 else:
719 else:
706 # looks good so far
720 # looks good so far
707 dirmove[dsrc] = ddst
721 dirmove[dsrc] = ddst
708
722
709 for i in invalid:
723 for i in invalid:
710 if i in dirmove:
724 if i in dirmove:
711 del dirmove[i]
725 del dirmove[i]
712 del d, invalid
726 del d, invalid
713
727
714 if not dirmove:
728 if not dirmove:
715 return {}, {}
729 return {}, {}
716
730
717 dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
731 dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
718
732
719 for d in dirmove:
733 for d in dirmove:
720 repo.ui.debug(
734 repo.ui.debug(
721 b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
735 b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
722 )
736 )
723
737
724 movewithdir = {}
738 movewithdir = {}
725 # check unaccounted nonoverlapping files against directory moves
739 # check unaccounted nonoverlapping files against directory moves
726 for f in addedfiles:
740 for f in addedfiles:
727 if f not in fullcopy:
741 if f not in fullcopy:
728 for d in dirmove:
742 for d in dirmove:
729 if f.startswith(d):
743 if f.startswith(d):
730 # new file added in a directory that was moved, move it
744 # new file added in a directory that was moved, move it
731 df = dirmove[d] + f[len(d) :]
745 df = dirmove[d] + f[len(d) :]
732 if df not in copy:
746 if df not in copy:
733 movewithdir[f] = df
747 movewithdir[f] = df
734 repo.ui.debug(
748 repo.ui.debug(
735 b" pending file src: '%s' -> dst: '%s'\n"
749 b" pending file src: '%s' -> dst: '%s'\n"
736 % (f, df)
750 % (f, df)
737 )
751 )
738 break
752 break
739
753
740 return dirmove, movewithdir
754 return dirmove, movewithdir
741
755
742
756
743 def _heuristicscopytracing(repo, c1, c2, base):
757 def _heuristicscopytracing(repo, c1, c2, base):
744 """ Fast copytracing using filename heuristics
758 """ Fast copytracing using filename heuristics
745
759
746 Assumes that moves or renames are of following two types:
760 Assumes that moves or renames are of following two types:
747
761
748 1) Inside a directory only (same directory name but different filenames)
762 1) Inside a directory only (same directory name but different filenames)
749 2) Move from one directory to another
763 2) Move from one directory to another
750 (same filenames but different directory names)
764 (same filenames but different directory names)
751
765
752 Works only when there are no merge commits in the "source branch".
766 Works only when there are no merge commits in the "source branch".
753 Source branch is commits from base up to c2 not including base.
767 Source branch is commits from base up to c2 not including base.
754
768
755 If merge is involved it fallbacks to _fullcopytracing().
769 If merge is involved it fallbacks to _fullcopytracing().
756
770
757 Can be used by setting the following config:
771 Can be used by setting the following config:
758
772
759 [experimental]
773 [experimental]
760 copytrace = heuristics
774 copytrace = heuristics
761
775
762 In some cases the copy/move candidates found by heuristics can be very large
776 In some cases the copy/move candidates found by heuristics can be very large
763 in number and that will make the algorithm slow. The number of possible
777 in number and that will make the algorithm slow. The number of possible
764 candidates to check can be limited by using the config
778 candidates to check can be limited by using the config
765 `experimental.copytrace.movecandidateslimit` which defaults to 100.
779 `experimental.copytrace.movecandidateslimit` which defaults to 100.
766 """
780 """
767
781
768 if c1.rev() is None:
782 if c1.rev() is None:
769 c1 = c1.p1()
783 c1 = c1.p1()
770 if c2.rev() is None:
784 if c2.rev() is None:
771 c2 = c2.p1()
785 c2 = c2.p1()
772
786
773 copies = {}
787 copies = {}
774
788
775 changedfiles = set()
789 changedfiles = set()
776 m1 = c1.manifest()
790 m1 = c1.manifest()
777 if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
791 if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
778 # If base is not in c2 branch, we switch to fullcopytracing
792 # If base is not in c2 branch, we switch to fullcopytracing
779 repo.ui.debug(
793 repo.ui.debug(
780 b"switching to full copytracing as base is not "
794 b"switching to full copytracing as base is not "
781 b"an ancestor of c2\n"
795 b"an ancestor of c2\n"
782 )
796 )
783 return _fullcopytracing(repo, c1, c2, base)
797 return _fullcopytracing(repo, c1, c2, base)
784
798
785 ctx = c2
799 ctx = c2
786 while ctx != base:
800 while ctx != base:
787 if len(ctx.parents()) == 2:
801 if len(ctx.parents()) == 2:
788 # To keep things simple let's not handle merges
802 # To keep things simple let's not handle merges
789 repo.ui.debug(b"switching to full copytracing because of merges\n")
803 repo.ui.debug(b"switching to full copytracing because of merges\n")
790 return _fullcopytracing(repo, c1, c2, base)
804 return _fullcopytracing(repo, c1, c2, base)
791 changedfiles.update(ctx.files())
805 changedfiles.update(ctx.files())
792 ctx = ctx.p1()
806 ctx = ctx.p1()
793
807
794 cp = _forwardcopies(base, c2)
808 cp = _forwardcopies(base, c2)
795 for dst, src in pycompat.iteritems(cp):
809 for dst, src in pycompat.iteritems(cp):
796 if src in m1:
810 if src in m1:
797 copies[dst] = src
811 copies[dst] = src
798
812
799 # file is missing if it isn't present in the destination, but is present in
813 # file is missing if it isn't present in the destination, but is present in
800 # the base and present in the source.
814 # the base and present in the source.
801 # Presence in the base is important to exclude added files, presence in the
815 # Presence in the base is important to exclude added files, presence in the
802 # source is important to exclude removed files.
816 # source is important to exclude removed files.
803 filt = lambda f: f not in m1 and f in base and f in c2
817 filt = lambda f: f not in m1 and f in base and f in c2
804 missingfiles = [f for f in changedfiles if filt(f)]
818 missingfiles = [f for f in changedfiles if filt(f)]
805
819
806 if missingfiles:
820 if missingfiles:
807 basenametofilename = collections.defaultdict(list)
821 basenametofilename = collections.defaultdict(list)
808 dirnametofilename = collections.defaultdict(list)
822 dirnametofilename = collections.defaultdict(list)
809
823
810 for f in m1.filesnotin(base.manifest()):
824 for f in m1.filesnotin(base.manifest()):
811 basename = os.path.basename(f)
825 basename = os.path.basename(f)
812 dirname = os.path.dirname(f)
826 dirname = os.path.dirname(f)
813 basenametofilename[basename].append(f)
827 basenametofilename[basename].append(f)
814 dirnametofilename[dirname].append(f)
828 dirnametofilename[dirname].append(f)
815
829
816 for f in missingfiles:
830 for f in missingfiles:
817 basename = os.path.basename(f)
831 basename = os.path.basename(f)
818 dirname = os.path.dirname(f)
832 dirname = os.path.dirname(f)
819 samebasename = basenametofilename[basename]
833 samebasename = basenametofilename[basename]
820 samedirname = dirnametofilename[dirname]
834 samedirname = dirnametofilename[dirname]
821 movecandidates = samebasename + samedirname
835 movecandidates = samebasename + samedirname
822 # f is guaranteed to be present in c2, that's why
836 # f is guaranteed to be present in c2, that's why
823 # c2.filectx(f) won't fail
837 # c2.filectx(f) won't fail
824 f2 = c2.filectx(f)
838 f2 = c2.filectx(f)
825 # we can have a lot of candidates which can slow down the heuristics
839 # we can have a lot of candidates which can slow down the heuristics
826 # config value to limit the number of candidates moves to check
840 # config value to limit the number of candidates moves to check
827 maxcandidates = repo.ui.configint(
841 maxcandidates = repo.ui.configint(
828 b'experimental', b'copytrace.movecandidateslimit'
842 b'experimental', b'copytrace.movecandidateslimit'
829 )
843 )
830
844
831 if len(movecandidates) > maxcandidates:
845 if len(movecandidates) > maxcandidates:
832 repo.ui.status(
846 repo.ui.status(
833 _(
847 _(
834 b"skipping copytracing for '%s', more "
848 b"skipping copytracing for '%s', more "
835 b"candidates than the limit: %d\n"
849 b"candidates than the limit: %d\n"
836 )
850 )
837 % (f, len(movecandidates))
851 % (f, len(movecandidates))
838 )
852 )
839 continue
853 continue
840
854
841 for candidate in movecandidates:
855 for candidate in movecandidates:
842 f1 = c1.filectx(candidate)
856 f1 = c1.filectx(candidate)
843 if _related(f1, f2):
857 if _related(f1, f2):
844 # if there are a few related copies then we'll merge
858 # if there are a few related copies then we'll merge
845 # changes into all of them. This matches the behaviour
859 # changes into all of them. This matches the behaviour
846 # of upstream copytracing
860 # of upstream copytracing
847 copies[candidate] = f
861 copies[candidate] = f
848
862
849 return copies, {}, {}, {}, {}
863 return branch_copies(copies), {}
850
864
851
865
852 def _related(f1, f2):
866 def _related(f1, f2):
853 """return True if f1 and f2 filectx have a common ancestor
867 """return True if f1 and f2 filectx have a common ancestor
854
868
855 Walk back to common ancestor to see if the two files originate
869 Walk back to common ancestor to see if the two files originate
856 from the same file. Since workingfilectx's rev() is None it messes
870 from the same file. Since workingfilectx's rev() is None it messes
857 up the integer comparison logic, hence the pre-step check for
871 up the integer comparison logic, hence the pre-step check for
858 None (f1 and f2 can only be workingfilectx's initially).
872 None (f1 and f2 can only be workingfilectx's initially).
859 """
873 """
860
874
861 if f1 == f2:
875 if f1 == f2:
862 return True # a match
876 return True # a match
863
877
864 g1, g2 = f1.ancestors(), f2.ancestors()
878 g1, g2 = f1.ancestors(), f2.ancestors()
865 try:
879 try:
866 f1r, f2r = f1.linkrev(), f2.linkrev()
880 f1r, f2r = f1.linkrev(), f2.linkrev()
867
881
868 if f1r is None:
882 if f1r is None:
869 f1 = next(g1)
883 f1 = next(g1)
870 if f2r is None:
884 if f2r is None:
871 f2 = next(g2)
885 f2 = next(g2)
872
886
873 while True:
887 while True:
874 f1r, f2r = f1.linkrev(), f2.linkrev()
888 f1r, f2r = f1.linkrev(), f2.linkrev()
875 if f1r > f2r:
889 if f1r > f2r:
876 f1 = next(g1)
890 f1 = next(g1)
877 elif f2r > f1r:
891 elif f2r > f1r:
878 f2 = next(g2)
892 f2 = next(g2)
879 else: # f1 and f2 point to files in the same linkrev
893 else: # f1 and f2 point to files in the same linkrev
880 return f1 == f2 # true if they point to the same file
894 return f1 == f2 # true if they point to the same file
881 except StopIteration:
895 except StopIteration:
882 return False
896 return False
883
897
884
898
885 def graftcopies(wctx, ctx, base):
899 def graftcopies(wctx, ctx, base):
886 """reproduce copies between base and ctx in the wctx
900 """reproduce copies between base and ctx in the wctx
887
901
888 Unlike mergecopies(), this function will only consider copies between base
902 Unlike mergecopies(), this function will only consider copies between base
889 and ctx; it will ignore copies between base and wctx. Also unlike
903 and ctx; it will ignore copies between base and wctx. Also unlike
890 mergecopies(), this function will apply copies to the working copy (instead
904 mergecopies(), this function will apply copies to the working copy (instead
891 of just returning information about the copies). That makes it cheaper
905 of just returning information about the copies). That makes it cheaper
892 (especially in the common case of base==ctx.p1()) and useful also when
906 (especially in the common case of base==ctx.p1()) and useful also when
893 experimental.copytrace=off.
907 experimental.copytrace=off.
894
908
895 merge.update() will have already marked most copies, but it will only
909 merge.update() will have already marked most copies, but it will only
896 mark copies if it thinks the source files are related (see
910 mark copies if it thinks the source files are related (see
897 merge._related()). It will also not mark copies if the file wasn't modified
911 merge._related()). It will also not mark copies if the file wasn't modified
898 on the local side. This function adds the copies that were "missed"
912 on the local side. This function adds the copies that were "missed"
899 by merge.update().
913 by merge.update().
900 """
914 """
901 new_copies = pathcopies(base, ctx)
915 new_copies = pathcopies(base, ctx)
902 _filter(wctx.p1(), wctx, new_copies)
916 _filter(wctx.p1(), wctx, new_copies)
903 for dst, src in pycompat.iteritems(new_copies):
917 for dst, src in pycompat.iteritems(new_copies):
904 wctx[dst].markcopied(src)
918 wctx[dst].markcopied(src)
905
919
906
920
907 def computechangesetfilesadded(ctx):
921 def computechangesetfilesadded(ctx):
908 """return the list of files added in a changeset
922 """return the list of files added in a changeset
909 """
923 """
910 added = []
924 added = []
911 for f in ctx.files():
925 for f in ctx.files():
912 if not any(f in p for p in ctx.parents()):
926 if not any(f in p for p in ctx.parents()):
913 added.append(f)
927 added.append(f)
914 return added
928 return added
915
929
916
930
917 def computechangesetfilesremoved(ctx):
931 def computechangesetfilesremoved(ctx):
918 """return the list of files removed in a changeset
932 """return the list of files removed in a changeset
919 """
933 """
920 removed = []
934 removed = []
921 for f in ctx.files():
935 for f in ctx.files():
922 if f not in ctx:
936 if f not in ctx:
923 removed.append(f)
937 removed.append(f)
924 return removed
938 return removed
925
939
926
940
927 def computechangesetcopies(ctx):
941 def computechangesetcopies(ctx):
928 """return the copies data for a changeset
942 """return the copies data for a changeset
929
943
930 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
944 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
931
945
932 Each dictionnary are in the form: `{newname: oldname}`
946 Each dictionnary are in the form: `{newname: oldname}`
933 """
947 """
934 p1copies = {}
948 p1copies = {}
935 p2copies = {}
949 p2copies = {}
936 p1 = ctx.p1()
950 p1 = ctx.p1()
937 p2 = ctx.p2()
951 p2 = ctx.p2()
938 narrowmatch = ctx._repo.narrowmatch()
952 narrowmatch = ctx._repo.narrowmatch()
939 for dst in ctx.files():
953 for dst in ctx.files():
940 if not narrowmatch(dst) or dst not in ctx:
954 if not narrowmatch(dst) or dst not in ctx:
941 continue
955 continue
942 copied = ctx[dst].renamed()
956 copied = ctx[dst].renamed()
943 if not copied:
957 if not copied:
944 continue
958 continue
945 src, srcnode = copied
959 src, srcnode = copied
946 if src in p1 and p1[src].filenode() == srcnode:
960 if src in p1 and p1[src].filenode() == srcnode:
947 p1copies[dst] = src
961 p1copies[dst] = src
948 elif src in p2 and p2[src].filenode() == srcnode:
962 elif src in p2 and p2[src].filenode() == srcnode:
949 p2copies[dst] = src
963 p2copies[dst] = src
950 return p1copies, p2copies
964 return p1copies, p2copies
951
965
952
966
953 def encodecopies(files, copies):
967 def encodecopies(files, copies):
954 items = []
968 items = []
955 for i, dst in enumerate(files):
969 for i, dst in enumerate(files):
956 if dst in copies:
970 if dst in copies:
957 items.append(b'%d\0%s' % (i, copies[dst]))
971 items.append(b'%d\0%s' % (i, copies[dst]))
958 if len(items) != len(copies):
972 if len(items) != len(copies):
959 raise error.ProgrammingError(
973 raise error.ProgrammingError(
960 b'some copy targets missing from file list'
974 b'some copy targets missing from file list'
961 )
975 )
962 return b"\n".join(items)
976 return b"\n".join(items)
963
977
964
978
965 def decodecopies(files, data):
979 def decodecopies(files, data):
966 try:
980 try:
967 copies = {}
981 copies = {}
968 if not data:
982 if not data:
969 return copies
983 return copies
970 for l in data.split(b'\n'):
984 for l in data.split(b'\n'):
971 strindex, src = l.split(b'\0')
985 strindex, src = l.split(b'\0')
972 i = int(strindex)
986 i = int(strindex)
973 dst = files[i]
987 dst = files[i]
974 copies[dst] = src
988 copies[dst] = src
975 return copies
989 return copies
976 except (ValueError, IndexError):
990 except (ValueError, IndexError):
977 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
991 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
978 # used different syntax for the value.
992 # used different syntax for the value.
979 return None
993 return None
980
994
981
995
982 def encodefileindices(files, subset):
996 def encodefileindices(files, subset):
983 subset = set(subset)
997 subset = set(subset)
984 indices = []
998 indices = []
985 for i, f in enumerate(files):
999 for i, f in enumerate(files):
986 if f in subset:
1000 if f in subset:
987 indices.append(b'%d' % i)
1001 indices.append(b'%d' % i)
988 return b'\n'.join(indices)
1002 return b'\n'.join(indices)
989
1003
990
1004
991 def decodefileindices(files, data):
1005 def decodefileindices(files, data):
992 try:
1006 try:
993 subset = []
1007 subset = []
994 if not data:
1008 if not data:
995 return subset
1009 return subset
996 for strindex in data.split(b'\n'):
1010 for strindex in data.split(b'\n'):
997 i = int(strindex)
1011 i = int(strindex)
998 if i < 0 or i >= len(files):
1012 if i < 0 or i >= len(files):
999 return None
1013 return None
1000 subset.append(files[i])
1014 subset.append(files[i])
1001 return subset
1015 return subset
1002 except (ValueError, IndexError):
1016 except (ValueError, IndexError):
1003 # Perhaps someone had chosen the same key name (e.g. "added") and
1017 # Perhaps someone had chosen the same key name (e.g. "added") and
1004 # used different syntax for the value.
1018 # used different syntax for the value.
1005 return None
1019 return None
1006
1020
1007
1021
1008 def _getsidedata(srcrepo, rev):
1022 def _getsidedata(srcrepo, rev):
1009 ctx = srcrepo[rev]
1023 ctx = srcrepo[rev]
1010 filescopies = computechangesetcopies(ctx)
1024 filescopies = computechangesetcopies(ctx)
1011 filesadded = computechangesetfilesadded(ctx)
1025 filesadded = computechangesetfilesadded(ctx)
1012 filesremoved = computechangesetfilesremoved(ctx)
1026 filesremoved = computechangesetfilesremoved(ctx)
1013 sidedata = {}
1027 sidedata = {}
1014 if any([filescopies, filesadded, filesremoved]):
1028 if any([filescopies, filesadded, filesremoved]):
1015 sortedfiles = sorted(ctx.files())
1029 sortedfiles = sorted(ctx.files())
1016 p1copies, p2copies = filescopies
1030 p1copies, p2copies = filescopies
1017 p1copies = encodecopies(sortedfiles, p1copies)
1031 p1copies = encodecopies(sortedfiles, p1copies)
1018 p2copies = encodecopies(sortedfiles, p2copies)
1032 p2copies = encodecopies(sortedfiles, p2copies)
1019 filesadded = encodefileindices(sortedfiles, filesadded)
1033 filesadded = encodefileindices(sortedfiles, filesadded)
1020 filesremoved = encodefileindices(sortedfiles, filesremoved)
1034 filesremoved = encodefileindices(sortedfiles, filesremoved)
1021 if p1copies:
1035 if p1copies:
1022 sidedata[sidedatamod.SD_P1COPIES] = p1copies
1036 sidedata[sidedatamod.SD_P1COPIES] = p1copies
1023 if p2copies:
1037 if p2copies:
1024 sidedata[sidedatamod.SD_P2COPIES] = p2copies
1038 sidedata[sidedatamod.SD_P2COPIES] = p2copies
1025 if filesadded:
1039 if filesadded:
1026 sidedata[sidedatamod.SD_FILESADDED] = filesadded
1040 sidedata[sidedatamod.SD_FILESADDED] = filesadded
1027 if filesremoved:
1041 if filesremoved:
1028 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
1042 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
1029 return sidedata
1043 return sidedata
1030
1044
1031
1045
1032 def getsidedataadder(srcrepo, destrepo):
1046 def getsidedataadder(srcrepo, destrepo):
1033 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
1047 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
1034 if pycompat.iswindows or not use_w:
1048 if pycompat.iswindows or not use_w:
1035 return _get_simple_sidedata_adder(srcrepo, destrepo)
1049 return _get_simple_sidedata_adder(srcrepo, destrepo)
1036 else:
1050 else:
1037 return _get_worker_sidedata_adder(srcrepo, destrepo)
1051 return _get_worker_sidedata_adder(srcrepo, destrepo)
1038
1052
1039
1053
1040 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
1054 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
1041 """The function used by worker precomputing sidedata
1055 """The function used by worker precomputing sidedata
1042
1056
1043 It read an input queue containing revision numbers
1057 It read an input queue containing revision numbers
1044 It write in an output queue containing (rev, <sidedata-map>)
1058 It write in an output queue containing (rev, <sidedata-map>)
1045
1059
1046 The `None` input value is used as a stop signal.
1060 The `None` input value is used as a stop signal.
1047
1061
1048 The `tokens` semaphore is user to avoid having too many unprocessed
1062 The `tokens` semaphore is user to avoid having too many unprocessed
1049 entries. The workers needs to acquire one token before fetching a task.
1063 entries. The workers needs to acquire one token before fetching a task.
1050 They will be released by the consumer of the produced data.
1064 They will be released by the consumer of the produced data.
1051 """
1065 """
1052 tokens.acquire()
1066 tokens.acquire()
1053 rev = revs_queue.get()
1067 rev = revs_queue.get()
1054 while rev is not None:
1068 while rev is not None:
1055 data = _getsidedata(srcrepo, rev)
1069 data = _getsidedata(srcrepo, rev)
1056 sidedata_queue.put((rev, data))
1070 sidedata_queue.put((rev, data))
1057 tokens.acquire()
1071 tokens.acquire()
1058 rev = revs_queue.get()
1072 rev = revs_queue.get()
1059 # processing of `None` is completed, release the token.
1073 # processing of `None` is completed, release the token.
1060 tokens.release()
1074 tokens.release()
1061
1075
1062
1076
1063 BUFF_PER_WORKER = 50
1077 BUFF_PER_WORKER = 50
1064
1078
1065
1079
1066 def _get_worker_sidedata_adder(srcrepo, destrepo):
1080 def _get_worker_sidedata_adder(srcrepo, destrepo):
1067 """The parallel version of the sidedata computation
1081 """The parallel version of the sidedata computation
1068
1082
1069 This code spawn a pool of worker that precompute a buffer of sidedata
1083 This code spawn a pool of worker that precompute a buffer of sidedata
1070 before we actually need them"""
1084 before we actually need them"""
1071 # avoid circular import copies -> scmutil -> worker -> copies
1085 # avoid circular import copies -> scmutil -> worker -> copies
1072 from . import worker
1086 from . import worker
1073
1087
1074 nbworkers = worker._numworkers(srcrepo.ui)
1088 nbworkers = worker._numworkers(srcrepo.ui)
1075
1089
1076 tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
1090 tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
1077 revsq = multiprocessing.Queue()
1091 revsq = multiprocessing.Queue()
1078 sidedataq = multiprocessing.Queue()
1092 sidedataq = multiprocessing.Queue()
1079
1093
1080 assert srcrepo.filtername is None
1094 assert srcrepo.filtername is None
1081 # queue all tasks beforehand, revision numbers are small and it make
1095 # queue all tasks beforehand, revision numbers are small and it make
1082 # synchronisation simpler
1096 # synchronisation simpler
1083 #
1097 #
1084 # Since the computation for each node can be quite expensive, the overhead
1098 # Since the computation for each node can be quite expensive, the overhead
1085 # of using a single queue is not revelant. In practice, most computation
1099 # of using a single queue is not revelant. In practice, most computation
1086 # are fast but some are very expensive and dominate all the other smaller
1100 # are fast but some are very expensive and dominate all the other smaller
1087 # cost.
1101 # cost.
1088 for r in srcrepo.changelog.revs():
1102 for r in srcrepo.changelog.revs():
1089 revsq.put(r)
1103 revsq.put(r)
1090 # queue the "no more tasks" markers
1104 # queue the "no more tasks" markers
1091 for i in range(nbworkers):
1105 for i in range(nbworkers):
1092 revsq.put(None)
1106 revsq.put(None)
1093
1107
1094 allworkers = []
1108 allworkers = []
1095 for i in range(nbworkers):
1109 for i in range(nbworkers):
1096 args = (srcrepo, revsq, sidedataq, tokens)
1110 args = (srcrepo, revsq, sidedataq, tokens)
1097 w = multiprocessing.Process(target=_sidedata_worker, args=args)
1111 w = multiprocessing.Process(target=_sidedata_worker, args=args)
1098 allworkers.append(w)
1112 allworkers.append(w)
1099 w.start()
1113 w.start()
1100
1114
1101 # dictionnary to store results for revision higher than we one we are
1115 # dictionnary to store results for revision higher than we one we are
1102 # looking for. For example, if we need the sidedatamap for 42, and 43 is
1116 # looking for. For example, if we need the sidedatamap for 42, and 43 is
1103 # received, when shelve 43 for later use.
1117 # received, when shelve 43 for later use.
1104 staging = {}
1118 staging = {}
1105
1119
1106 def sidedata_companion(revlog, rev):
1120 def sidedata_companion(revlog, rev):
1107 sidedata = {}
1121 sidedata = {}
1108 if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
1122 if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
1109 # Is the data previously shelved ?
1123 # Is the data previously shelved ?
1110 sidedata = staging.pop(rev, None)
1124 sidedata = staging.pop(rev, None)
1111 if sidedata is None:
1125 if sidedata is None:
1112 # look at the queued result until we find the one we are lookig
1126 # look at the queued result until we find the one we are lookig
1113 # for (shelve the other ones)
1127 # for (shelve the other ones)
1114 r, sidedata = sidedataq.get()
1128 r, sidedata = sidedataq.get()
1115 while r != rev:
1129 while r != rev:
1116 staging[r] = sidedata
1130 staging[r] = sidedata
1117 r, sidedata = sidedataq.get()
1131 r, sidedata = sidedataq.get()
1118 tokens.release()
1132 tokens.release()
1119 return False, (), sidedata
1133 return False, (), sidedata
1120
1134
1121 return sidedata_companion
1135 return sidedata_companion
1122
1136
1123
1137
1124 def _get_simple_sidedata_adder(srcrepo, destrepo):
1138 def _get_simple_sidedata_adder(srcrepo, destrepo):
1125 """The simple version of the sidedata computation
1139 """The simple version of the sidedata computation
1126
1140
1127 It just compute it in the same thread on request"""
1141 It just compute it in the same thread on request"""
1128
1142
1129 def sidedatacompanion(revlog, rev):
1143 def sidedatacompanion(revlog, rev):
1130 sidedata = {}
1144 sidedata = {}
1131 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
1145 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
1132 sidedata = _getsidedata(srcrepo, rev)
1146 sidedata = _getsidedata(srcrepo, rev)
1133 return False, (), sidedata
1147 return False, (), sidedata
1134
1148
1135 return sidedatacompanion
1149 return sidedatacompanion
1136
1150
1137
1151
1138 def getsidedataremover(srcrepo, destrepo):
1152 def getsidedataremover(srcrepo, destrepo):
1139 def sidedatacompanion(revlog, rev):
1153 def sidedatacompanion(revlog, rev):
1140 f = ()
1154 f = ()
1141 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
1155 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
1142 if revlog.flags(rev) & REVIDX_SIDEDATA:
1156 if revlog.flags(rev) & REVIDX_SIDEDATA:
1143 f = (
1157 f = (
1144 sidedatamod.SD_P1COPIES,
1158 sidedatamod.SD_P1COPIES,
1145 sidedatamod.SD_P2COPIES,
1159 sidedatamod.SD_P2COPIES,
1146 sidedatamod.SD_FILESADDED,
1160 sidedatamod.SD_FILESADDED,
1147 sidedatamod.SD_FILESREMOVED,
1161 sidedatamod.SD_FILESREMOVED,
1148 )
1162 )
1149 return False, f, {}
1163 return False, f, {}
1150
1164
1151 return sidedatacompanion
1165 return sidedatacompanion
@@ -1,2708 +1,2711 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import shutil
11 import shutil
12 import stat
12 import stat
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from .pycompat import delattr
25 from .pycompat import delattr
26 from .thirdparty import attr
26 from .thirdparty import attr
27 from . import (
27 from . import (
28 copies,
28 copies,
29 encoding,
29 encoding,
30 error,
30 error,
31 filemerge,
31 filemerge,
32 match as matchmod,
32 match as matchmod,
33 obsutil,
33 obsutil,
34 pathutil,
34 pathutil,
35 pycompat,
35 pycompat,
36 scmutil,
36 scmutil,
37 subrepoutil,
37 subrepoutil,
38 util,
38 util,
39 worker,
39 worker,
40 )
40 )
41 from .utils import hashutil
41 from .utils import hashutil
42
42
43 _pack = struct.pack
43 _pack = struct.pack
44 _unpack = struct.unpack
44 _unpack = struct.unpack
45
45
46
46
47 def _droponode(data):
47 def _droponode(data):
48 # used for compatibility for v1
48 # used for compatibility for v1
49 bits = data.split(b'\0')
49 bits = data.split(b'\0')
50 bits = bits[:-2] + bits[-1:]
50 bits = bits[:-2] + bits[-1:]
51 return b'\0'.join(bits)
51 return b'\0'.join(bits)
52
52
53
53
54 # Merge state record types. See ``mergestate`` docs for more.
54 # Merge state record types. See ``mergestate`` docs for more.
55 RECORD_LOCAL = b'L'
55 RECORD_LOCAL = b'L'
56 RECORD_OTHER = b'O'
56 RECORD_OTHER = b'O'
57 RECORD_MERGED = b'F'
57 RECORD_MERGED = b'F'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
60 RECORD_PATH_CONFLICT = b'P'
60 RECORD_PATH_CONFLICT = b'P'
61 RECORD_MERGE_DRIVER_STATE = b'm'
61 RECORD_MERGE_DRIVER_STATE = b'm'
62 RECORD_FILE_VALUES = b'f'
62 RECORD_FILE_VALUES = b'f'
63 RECORD_LABELS = b'l'
63 RECORD_LABELS = b'l'
64 RECORD_OVERRIDE = b't'
64 RECORD_OVERRIDE = b't'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
67
67
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
69 MERGE_DRIVER_STATE_MARKED = b'm'
69 MERGE_DRIVER_STATE_MARKED = b'm'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
71
71
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
77
77
78 ACTION_FORGET = b'f'
78 ACTION_FORGET = b'f'
79 ACTION_REMOVE = b'r'
79 ACTION_REMOVE = b'r'
80 ACTION_ADD = b'a'
80 ACTION_ADD = b'a'
81 ACTION_GET = b'g'
81 ACTION_GET = b'g'
82 ACTION_PATH_CONFLICT = b'p'
82 ACTION_PATH_CONFLICT = b'p'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
84 ACTION_ADD_MODIFIED = b'am'
84 ACTION_ADD_MODIFIED = b'am'
85 ACTION_CREATED = b'c'
85 ACTION_CREATED = b'c'
86 ACTION_DELETED_CHANGED = b'dc'
86 ACTION_DELETED_CHANGED = b'dc'
87 ACTION_CHANGED_DELETED = b'cd'
87 ACTION_CHANGED_DELETED = b'cd'
88 ACTION_MERGE = b'm'
88 ACTION_MERGE = b'm'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
91 ACTION_KEEP = b'k'
91 ACTION_KEEP = b'k'
92 ACTION_EXEC = b'e'
92 ACTION_EXEC = b'e'
93 ACTION_CREATED_MERGE = b'cm'
93 ACTION_CREATED_MERGE = b'cm'
94
94
95
95
96 class mergestate(object):
96 class mergestate(object):
97 '''track 3-way merge state of individual files
97 '''track 3-way merge state of individual files
98
98
99 The merge state is stored on disk when needed. Two files are used: one with
99 The merge state is stored on disk when needed. Two files are used: one with
100 an old format (version 1), and one with a new format (version 2). Version 2
100 an old format (version 1), and one with a new format (version 2). Version 2
101 stores a superset of the data in version 1, including new kinds of records
101 stores a superset of the data in version 1, including new kinds of records
102 in the future. For more about the new format, see the documentation for
102 in the future. For more about the new format, see the documentation for
103 `_readrecordsv2`.
103 `_readrecordsv2`.
104
104
105 Each record can contain arbitrary content, and has an associated type. This
105 Each record can contain arbitrary content, and has an associated type. This
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
107 versions of Mercurial that don't support it should abort. If `type` is
107 versions of Mercurial that don't support it should abort. If `type` is
108 lowercase, the record can be safely ignored.
108 lowercase, the record can be safely ignored.
109
109
110 Currently known records:
110 Currently known records:
111
111
112 L: the node of the "local" part of the merge (hexified version)
112 L: the node of the "local" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
114 F: a file to be merged entry
114 F: a file to be merged entry
115 C: a change/delete or delete/change conflict
115 C: a change/delete or delete/change conflict
116 D: a file that the external merge driver will merge internally
116 D: a file that the external merge driver will merge internally
117 (experimental)
117 (experimental)
118 P: a path conflict (file vs directory)
118 P: a path conflict (file vs directory)
119 m: the external merge driver defined for this merge plus its run state
119 m: the external merge driver defined for this merge plus its run state
120 (experimental)
120 (experimental)
121 f: a (filename, dictionary) tuple of optional values for a given file
121 f: a (filename, dictionary) tuple of optional values for a given file
122 X: unsupported mandatory record type (used in tests)
122 X: unsupported mandatory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
124 l: the labels for the parts of the merge.
124 l: the labels for the parts of the merge.
125
125
126 Merge driver run states (experimental):
126 Merge driver run states (experimental):
127 u: driver-resolved files unmarked -- needs to be run next time we're about
127 u: driver-resolved files unmarked -- needs to be run next time we're about
128 to resolve or commit
128 to resolve or commit
129 m: driver-resolved files marked -- only needs to be run before commit
129 m: driver-resolved files marked -- only needs to be run before commit
130 s: success/skipped -- does not need to be run any more
130 s: success/skipped -- does not need to be run any more
131
131
132 Merge record states (stored in self._state, indexed by filename):
132 Merge record states (stored in self._state, indexed by filename):
133 u: unresolved conflict
133 u: unresolved conflict
134 r: resolved conflict
134 r: resolved conflict
135 pu: unresolved path conflict (file conflicts with directory)
135 pu: unresolved path conflict (file conflicts with directory)
136 pr: resolved path conflict
136 pr: resolved path conflict
137 d: driver-resolved conflict
137 d: driver-resolved conflict
138
138
139 The resolve command transitions between 'u' and 'r' for conflicts and
139 The resolve command transitions between 'u' and 'r' for conflicts and
140 'pu' and 'pr' for path conflicts.
140 'pu' and 'pr' for path conflicts.
141 '''
141 '''
142
142
143 statepathv1 = b'merge/state'
143 statepathv1 = b'merge/state'
144 statepathv2 = b'merge/state2'
144 statepathv2 = b'merge/state2'
145
145
146 @staticmethod
146 @staticmethod
147 def clean(repo, node=None, other=None, labels=None):
147 def clean(repo, node=None, other=None, labels=None):
148 """Initialize a brand new merge state, removing any existing state on
148 """Initialize a brand new merge state, removing any existing state on
149 disk."""
149 disk."""
150 ms = mergestate(repo)
150 ms = mergestate(repo)
151 ms.reset(node, other, labels)
151 ms.reset(node, other, labels)
152 return ms
152 return ms
153
153
154 @staticmethod
154 @staticmethod
155 def read(repo):
155 def read(repo):
156 """Initialize the merge state, reading it from disk."""
156 """Initialize the merge state, reading it from disk."""
157 ms = mergestate(repo)
157 ms = mergestate(repo)
158 ms._read()
158 ms._read()
159 return ms
159 return ms
160
160
161 def __init__(self, repo):
161 def __init__(self, repo):
162 """Initialize the merge state.
162 """Initialize the merge state.
163
163
164 Do not use this directly! Instead call read() or clean()."""
164 Do not use this directly! Instead call read() or clean()."""
165 self._repo = repo
165 self._repo = repo
166 self._dirty = False
166 self._dirty = False
167 self._labels = None
167 self._labels = None
168
168
169 def reset(self, node=None, other=None, labels=None):
169 def reset(self, node=None, other=None, labels=None):
170 self._state = {}
170 self._state = {}
171 self._stateextras = {}
171 self._stateextras = {}
172 self._local = None
172 self._local = None
173 self._other = None
173 self._other = None
174 self._labels = labels
174 self._labels = labels
175 for var in ('localctx', 'otherctx'):
175 for var in ('localctx', 'otherctx'):
176 if var in vars(self):
176 if var in vars(self):
177 delattr(self, var)
177 delattr(self, var)
178 if node:
178 if node:
179 self._local = node
179 self._local = node
180 self._other = other
180 self._other = other
181 self._readmergedriver = None
181 self._readmergedriver = None
182 if self.mergedriver:
182 if self.mergedriver:
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
184 else:
184 else:
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
187 self._results = {}
187 self._results = {}
188 self._dirty = False
188 self._dirty = False
189
189
190 def _read(self):
190 def _read(self):
191 """Analyse each record content to restore a serialized state from disk
191 """Analyse each record content to restore a serialized state from disk
192
192
193 This function process "record" entry produced by the de-serialization
193 This function process "record" entry produced by the de-serialization
194 of on disk file.
194 of on disk file.
195 """
195 """
196 self._state = {}
196 self._state = {}
197 self._stateextras = {}
197 self._stateextras = {}
198 self._local = None
198 self._local = None
199 self._other = None
199 self._other = None
200 for var in ('localctx', 'otherctx'):
200 for var in ('localctx', 'otherctx'):
201 if var in vars(self):
201 if var in vars(self):
202 delattr(self, var)
202 delattr(self, var)
203 self._readmergedriver = None
203 self._readmergedriver = None
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
205 unsupported = set()
205 unsupported = set()
206 records = self._readrecords()
206 records = self._readrecords()
207 for rtype, record in records:
207 for rtype, record in records:
208 if rtype == RECORD_LOCAL:
208 if rtype == RECORD_LOCAL:
209 self._local = bin(record)
209 self._local = bin(record)
210 elif rtype == RECORD_OTHER:
210 elif rtype == RECORD_OTHER:
211 self._other = bin(record)
211 self._other = bin(record)
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
213 bits = record.split(b'\0', 1)
213 bits = record.split(b'\0', 1)
214 mdstate = bits[1]
214 mdstate = bits[1]
215 if len(mdstate) != 1 or mdstate not in (
215 if len(mdstate) != 1 or mdstate not in (
216 MERGE_DRIVER_STATE_UNMARKED,
216 MERGE_DRIVER_STATE_UNMARKED,
217 MERGE_DRIVER_STATE_MARKED,
217 MERGE_DRIVER_STATE_MARKED,
218 MERGE_DRIVER_STATE_SUCCESS,
218 MERGE_DRIVER_STATE_SUCCESS,
219 ):
219 ):
220 # the merge driver should be idempotent, so just rerun it
220 # the merge driver should be idempotent, so just rerun it
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
222
222
223 self._readmergedriver = bits[0]
223 self._readmergedriver = bits[0]
224 self._mdstate = mdstate
224 self._mdstate = mdstate
225 elif rtype in (
225 elif rtype in (
226 RECORD_MERGED,
226 RECORD_MERGED,
227 RECORD_CHANGEDELETE_CONFLICT,
227 RECORD_CHANGEDELETE_CONFLICT,
228 RECORD_PATH_CONFLICT,
228 RECORD_PATH_CONFLICT,
229 RECORD_MERGE_DRIVER_MERGE,
229 RECORD_MERGE_DRIVER_MERGE,
230 ):
230 ):
231 bits = record.split(b'\0')
231 bits = record.split(b'\0')
232 self._state[bits[0]] = bits[1:]
232 self._state[bits[0]] = bits[1:]
233 elif rtype == RECORD_FILE_VALUES:
233 elif rtype == RECORD_FILE_VALUES:
234 filename, rawextras = record.split(b'\0', 1)
234 filename, rawextras = record.split(b'\0', 1)
235 extraparts = rawextras.split(b'\0')
235 extraparts = rawextras.split(b'\0')
236 extras = {}
236 extras = {}
237 i = 0
237 i = 0
238 while i < len(extraparts):
238 while i < len(extraparts):
239 extras[extraparts[i]] = extraparts[i + 1]
239 extras[extraparts[i]] = extraparts[i + 1]
240 i += 2
240 i += 2
241
241
242 self._stateextras[filename] = extras
242 self._stateextras[filename] = extras
243 elif rtype == RECORD_LABELS:
243 elif rtype == RECORD_LABELS:
244 labels = record.split(b'\0', 2)
244 labels = record.split(b'\0', 2)
245 self._labels = [l for l in labels if len(l) > 0]
245 self._labels = [l for l in labels if len(l) > 0]
246 elif not rtype.islower():
246 elif not rtype.islower():
247 unsupported.add(rtype)
247 unsupported.add(rtype)
248 self._results = {}
248 self._results = {}
249 self._dirty = False
249 self._dirty = False
250
250
251 if unsupported:
251 if unsupported:
252 raise error.UnsupportedMergeRecords(unsupported)
252 raise error.UnsupportedMergeRecords(unsupported)
253
253
254 def _readrecords(self):
254 def _readrecords(self):
255 """Read merge state from disk and return a list of record (TYPE, data)
255 """Read merge state from disk and return a list of record (TYPE, data)
256
256
257 We read data from both v1 and v2 files and decide which one to use.
257 We read data from both v1 and v2 files and decide which one to use.
258
258
259 V1 has been used by version prior to 2.9.1 and contains less data than
259 V1 has been used by version prior to 2.9.1 and contains less data than
260 v2. We read both versions and check if no data in v2 contradicts
260 v2. We read both versions and check if no data in v2 contradicts
261 v1. If there is not contradiction we can safely assume that both v1
261 v1. If there is not contradiction we can safely assume that both v1
262 and v2 were written at the same time and use the extract data in v2. If
262 and v2 were written at the same time and use the extract data in v2. If
263 there is contradiction we ignore v2 content as we assume an old version
263 there is contradiction we ignore v2 content as we assume an old version
264 of Mercurial has overwritten the mergestate file and left an old v2
264 of Mercurial has overwritten the mergestate file and left an old v2
265 file around.
265 file around.
266
266
267 returns list of record [(TYPE, data), ...]"""
267 returns list of record [(TYPE, data), ...]"""
268 v1records = self._readrecordsv1()
268 v1records = self._readrecordsv1()
269 v2records = self._readrecordsv2()
269 v2records = self._readrecordsv2()
270 if self._v1v2match(v1records, v2records):
270 if self._v1v2match(v1records, v2records):
271 return v2records
271 return v2records
272 else:
272 else:
273 # v1 file is newer than v2 file, use it
273 # v1 file is newer than v2 file, use it
274 # we have to infer the "other" changeset of the merge
274 # we have to infer the "other" changeset of the merge
275 # we cannot do better than that with v1 of the format
275 # we cannot do better than that with v1 of the format
276 mctx = self._repo[None].parents()[-1]
276 mctx = self._repo[None].parents()[-1]
277 v1records.append((RECORD_OTHER, mctx.hex()))
277 v1records.append((RECORD_OTHER, mctx.hex()))
278 # add place holder "other" file node information
278 # add place holder "other" file node information
279 # nobody is using it yet so we do no need to fetch the data
279 # nobody is using it yet so we do no need to fetch the data
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
281 for idx, r in enumerate(v1records):
281 for idx, r in enumerate(v1records):
282 if r[0] == RECORD_MERGED:
282 if r[0] == RECORD_MERGED:
283 bits = r[1].split(b'\0')
283 bits = r[1].split(b'\0')
284 bits.insert(-2, b'')
284 bits.insert(-2, b'')
285 v1records[idx] = (r[0], b'\0'.join(bits))
285 v1records[idx] = (r[0], b'\0'.join(bits))
286 return v1records
286 return v1records
287
287
288 def _v1v2match(self, v1records, v2records):
288 def _v1v2match(self, v1records, v2records):
289 oldv2 = set() # old format version of v2 record
289 oldv2 = set() # old format version of v2 record
290 for rec in v2records:
290 for rec in v2records:
291 if rec[0] == RECORD_LOCAL:
291 if rec[0] == RECORD_LOCAL:
292 oldv2.add(rec)
292 oldv2.add(rec)
293 elif rec[0] == RECORD_MERGED:
293 elif rec[0] == RECORD_MERGED:
294 # drop the onode data (not contained in v1)
294 # drop the onode data (not contained in v1)
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
296 for rec in v1records:
296 for rec in v1records:
297 if rec not in oldv2:
297 if rec not in oldv2:
298 return False
298 return False
299 else:
299 else:
300 return True
300 return True
301
301
302 def _readrecordsv1(self):
302 def _readrecordsv1(self):
303 """read on disk merge state for version 1 file
303 """read on disk merge state for version 1 file
304
304
305 returns list of record [(TYPE, data), ...]
305 returns list of record [(TYPE, data), ...]
306
306
307 Note: the "F" data from this file are one entry short
307 Note: the "F" data from this file are one entry short
308 (no "other file node" entry)
308 (no "other file node" entry)
309 """
309 """
310 records = []
310 records = []
311 try:
311 try:
312 f = self._repo.vfs(self.statepathv1)
312 f = self._repo.vfs(self.statepathv1)
313 for i, l in enumerate(f):
313 for i, l in enumerate(f):
314 if i == 0:
314 if i == 0:
315 records.append((RECORD_LOCAL, l[:-1]))
315 records.append((RECORD_LOCAL, l[:-1]))
316 else:
316 else:
317 records.append((RECORD_MERGED, l[:-1]))
317 records.append((RECORD_MERGED, l[:-1]))
318 f.close()
318 f.close()
319 except IOError as err:
319 except IOError as err:
320 if err.errno != errno.ENOENT:
320 if err.errno != errno.ENOENT:
321 raise
321 raise
322 return records
322 return records
323
323
324 def _readrecordsv2(self):
324 def _readrecordsv2(self):
325 """read on disk merge state for version 2 file
325 """read on disk merge state for version 2 file
326
326
327 This format is a list of arbitrary records of the form:
327 This format is a list of arbitrary records of the form:
328
328
329 [type][length][content]
329 [type][length][content]
330
330
331 `type` is a single character, `length` is a 4 byte integer, and
331 `type` is a single character, `length` is a 4 byte integer, and
332 `content` is an arbitrary byte sequence of length `length`.
332 `content` is an arbitrary byte sequence of length `length`.
333
333
334 Mercurial versions prior to 3.7 have a bug where if there are
334 Mercurial versions prior to 3.7 have a bug where if there are
335 unsupported mandatory merge records, attempting to clear out the merge
335 unsupported mandatory merge records, attempting to clear out the merge
336 state with hg update --clean or similar aborts. The 't' record type
336 state with hg update --clean or similar aborts. The 't' record type
337 works around that by writing out what those versions treat as an
337 works around that by writing out what those versions treat as an
338 advisory record, but later versions interpret as special: the first
338 advisory record, but later versions interpret as special: the first
339 character is the 'real' record type and everything onwards is the data.
339 character is the 'real' record type and everything onwards is the data.
340
340
341 Returns list of records [(TYPE, data), ...]."""
341 Returns list of records [(TYPE, data), ...]."""
342 records = []
342 records = []
343 try:
343 try:
344 f = self._repo.vfs(self.statepathv2)
344 f = self._repo.vfs(self.statepathv2)
345 data = f.read()
345 data = f.read()
346 off = 0
346 off = 0
347 end = len(data)
347 end = len(data)
348 while off < end:
348 while off < end:
349 rtype = data[off : off + 1]
349 rtype = data[off : off + 1]
350 off += 1
350 off += 1
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
352 off += 4
352 off += 4
353 record = data[off : (off + length)]
353 record = data[off : (off + length)]
354 off += length
354 off += length
355 if rtype == RECORD_OVERRIDE:
355 if rtype == RECORD_OVERRIDE:
356 rtype, record = record[0:1], record[1:]
356 rtype, record = record[0:1], record[1:]
357 records.append((rtype, record))
357 records.append((rtype, record))
358 f.close()
358 f.close()
359 except IOError as err:
359 except IOError as err:
360 if err.errno != errno.ENOENT:
360 if err.errno != errno.ENOENT:
361 raise
361 raise
362 return records
362 return records
363
363
364 @util.propertycache
364 @util.propertycache
365 def mergedriver(self):
365 def mergedriver(self):
366 # protect against the following:
366 # protect against the following:
367 # - A configures a malicious merge driver in their hgrc, then
367 # - A configures a malicious merge driver in their hgrc, then
368 # pauses the merge
368 # pauses the merge
369 # - A edits their hgrc to remove references to the merge driver
369 # - A edits their hgrc to remove references to the merge driver
370 # - A gives a copy of their entire repo, including .hg, to B
370 # - A gives a copy of their entire repo, including .hg, to B
371 # - B inspects .hgrc and finds it to be clean
371 # - B inspects .hgrc and finds it to be clean
372 # - B then continues the merge and the malicious merge driver
372 # - B then continues the merge and the malicious merge driver
373 # gets invoked
373 # gets invoked
374 configmergedriver = self._repo.ui.config(
374 configmergedriver = self._repo.ui.config(
375 b'experimental', b'mergedriver'
375 b'experimental', b'mergedriver'
376 )
376 )
377 if (
377 if (
378 self._readmergedriver is not None
378 self._readmergedriver is not None
379 and self._readmergedriver != configmergedriver
379 and self._readmergedriver != configmergedriver
380 ):
380 ):
381 raise error.ConfigError(
381 raise error.ConfigError(
382 _(b"merge driver changed since merge started"),
382 _(b"merge driver changed since merge started"),
383 hint=_(b"revert merge driver change or abort merge"),
383 hint=_(b"revert merge driver change or abort merge"),
384 )
384 )
385
385
386 return configmergedriver
386 return configmergedriver
387
387
388 @util.propertycache
388 @util.propertycache
389 def localctx(self):
389 def localctx(self):
390 if self._local is None:
390 if self._local is None:
391 msg = b"localctx accessed but self._local isn't set"
391 msg = b"localctx accessed but self._local isn't set"
392 raise error.ProgrammingError(msg)
392 raise error.ProgrammingError(msg)
393 return self._repo[self._local]
393 return self._repo[self._local]
394
394
395 @util.propertycache
395 @util.propertycache
396 def otherctx(self):
396 def otherctx(self):
397 if self._other is None:
397 if self._other is None:
398 msg = b"otherctx accessed but self._other isn't set"
398 msg = b"otherctx accessed but self._other isn't set"
399 raise error.ProgrammingError(msg)
399 raise error.ProgrammingError(msg)
400 return self._repo[self._other]
400 return self._repo[self._other]
401
401
402 def active(self):
402 def active(self):
403 """Whether mergestate is active.
403 """Whether mergestate is active.
404
404
405 Returns True if there appears to be mergestate. This is a rough proxy
405 Returns True if there appears to be mergestate. This is a rough proxy
406 for "is a merge in progress."
406 for "is a merge in progress."
407 """
407 """
408 # Check local variables before looking at filesystem for performance
408 # Check local variables before looking at filesystem for performance
409 # reasons.
409 # reasons.
410 return (
410 return (
411 bool(self._local)
411 bool(self._local)
412 or bool(self._state)
412 or bool(self._state)
413 or self._repo.vfs.exists(self.statepathv1)
413 or self._repo.vfs.exists(self.statepathv1)
414 or self._repo.vfs.exists(self.statepathv2)
414 or self._repo.vfs.exists(self.statepathv2)
415 )
415 )
416
416
417 def commit(self):
417 def commit(self):
418 """Write current state on disk (if necessary)"""
418 """Write current state on disk (if necessary)"""
419 if self._dirty:
419 if self._dirty:
420 records = self._makerecords()
420 records = self._makerecords()
421 self._writerecords(records)
421 self._writerecords(records)
422 self._dirty = False
422 self._dirty = False
423
423
424 def _makerecords(self):
424 def _makerecords(self):
425 records = []
425 records = []
426 records.append((RECORD_LOCAL, hex(self._local)))
426 records.append((RECORD_LOCAL, hex(self._local)))
427 records.append((RECORD_OTHER, hex(self._other)))
427 records.append((RECORD_OTHER, hex(self._other)))
428 if self.mergedriver:
428 if self.mergedriver:
429 records.append(
429 records.append(
430 (
430 (
431 RECORD_MERGE_DRIVER_STATE,
431 RECORD_MERGE_DRIVER_STATE,
432 b'\0'.join([self.mergedriver, self._mdstate]),
432 b'\0'.join([self.mergedriver, self._mdstate]),
433 )
433 )
434 )
434 )
435 # Write out state items. In all cases, the value of the state map entry
435 # Write out state items. In all cases, the value of the state map entry
436 # is written as the contents of the record. The record type depends on
436 # is written as the contents of the record. The record type depends on
437 # the type of state that is stored, and capital-letter records are used
437 # the type of state that is stored, and capital-letter records are used
438 # to prevent older versions of Mercurial that do not support the feature
438 # to prevent older versions of Mercurial that do not support the feature
439 # from loading them.
439 # from loading them.
440 for filename, v in pycompat.iteritems(self._state):
440 for filename, v in pycompat.iteritems(self._state):
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
442 # Driver-resolved merge. These are stored in 'D' records.
442 # Driver-resolved merge. These are stored in 'D' records.
443 records.append(
443 records.append(
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
445 )
445 )
446 elif v[0] in (
446 elif v[0] in (
447 MERGE_RECORD_UNRESOLVED_PATH,
447 MERGE_RECORD_UNRESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
449 ):
449 ):
450 # Path conflicts. These are stored in 'P' records. The current
450 # Path conflicts. These are stored in 'P' records. The current
451 # resolution state ('pu' or 'pr') is stored within the record.
451 # resolution state ('pu' or 'pr') is stored within the record.
452 records.append(
452 records.append(
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
454 )
454 )
455 elif v[1] == nullhex or v[6] == nullhex:
455 elif v[1] == nullhex or v[6] == nullhex:
456 # Change/Delete or Delete/Change conflicts. These are stored in
456 # Change/Delete or Delete/Change conflicts. These are stored in
457 # 'C' records. v[1] is the local file, and is nullhex when the
457 # 'C' records. v[1] is the local file, and is nullhex when the
458 # file is deleted locally ('dc'). v[6] is the remote file, and
458 # file is deleted locally ('dc'). v[6] is the remote file, and
459 # is nullhex when the file is deleted remotely ('cd').
459 # is nullhex when the file is deleted remotely ('cd').
460 records.append(
460 records.append(
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
462 )
462 )
463 else:
463 else:
464 # Normal files. These are stored in 'F' records.
464 # Normal files. These are stored in 'F' records.
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
467 rawextras = b'\0'.join(
467 rawextras = b'\0'.join(
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
469 )
469 )
470 records.append(
470 records.append(
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
472 )
472 )
473 if self._labels is not None:
473 if self._labels is not None:
474 labels = b'\0'.join(self._labels)
474 labels = b'\0'.join(self._labels)
475 records.append((RECORD_LABELS, labels))
475 records.append((RECORD_LABELS, labels))
476 return records
476 return records
477
477
478 def _writerecords(self, records):
478 def _writerecords(self, records):
479 """Write current state on disk (both v1 and v2)"""
479 """Write current state on disk (both v1 and v2)"""
480 self._writerecordsv1(records)
480 self._writerecordsv1(records)
481 self._writerecordsv2(records)
481 self._writerecordsv2(records)
482
482
483 def _writerecordsv1(self, records):
483 def _writerecordsv1(self, records):
484 """Write current state on disk in a version 1 file"""
484 """Write current state on disk in a version 1 file"""
485 f = self._repo.vfs(self.statepathv1, b'wb')
485 f = self._repo.vfs(self.statepathv1, b'wb')
486 irecords = iter(records)
486 irecords = iter(records)
487 lrecords = next(irecords)
487 lrecords = next(irecords)
488 assert lrecords[0] == RECORD_LOCAL
488 assert lrecords[0] == RECORD_LOCAL
489 f.write(hex(self._local) + b'\n')
489 f.write(hex(self._local) + b'\n')
490 for rtype, data in irecords:
490 for rtype, data in irecords:
491 if rtype == RECORD_MERGED:
491 if rtype == RECORD_MERGED:
492 f.write(b'%s\n' % _droponode(data))
492 f.write(b'%s\n' % _droponode(data))
493 f.close()
493 f.close()
494
494
495 def _writerecordsv2(self, records):
495 def _writerecordsv2(self, records):
496 """Write current state on disk in a version 2 file
496 """Write current state on disk in a version 2 file
497
497
498 See the docstring for _readrecordsv2 for why we use 't'."""
498 See the docstring for _readrecordsv2 for why we use 't'."""
499 # these are the records that all version 2 clients can read
499 # these are the records that all version 2 clients can read
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
501 f = self._repo.vfs(self.statepathv2, b'wb')
501 f = self._repo.vfs(self.statepathv2, b'wb')
502 for key, data in records:
502 for key, data in records:
503 assert len(key) == 1
503 assert len(key) == 1
504 if key not in allowlist:
504 if key not in allowlist:
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
506 format = b'>sI%is' % len(data)
506 format = b'>sI%is' % len(data)
507 f.write(_pack(format, key, len(data), data))
507 f.write(_pack(format, key, len(data), data))
508 f.close()
508 f.close()
509
509
510 @staticmethod
510 @staticmethod
511 def getlocalkey(path):
511 def getlocalkey(path):
512 """hash the path of a local file context for storage in the .hg/merge
512 """hash the path of a local file context for storage in the .hg/merge
513 directory."""
513 directory."""
514
514
515 return hex(hashutil.sha1(path).digest())
515 return hex(hashutil.sha1(path).digest())
516
516
517 def add(self, fcl, fco, fca, fd):
517 def add(self, fcl, fco, fca, fd):
518 """add a new (potentially?) conflicting file the merge state
518 """add a new (potentially?) conflicting file the merge state
519 fcl: file context for local,
519 fcl: file context for local,
520 fco: file context for remote,
520 fco: file context for remote,
521 fca: file context for ancestors,
521 fca: file context for ancestors,
522 fd: file path of the resulting merge.
522 fd: file path of the resulting merge.
523
523
524 note: also write the local version to the `.hg/merge` directory.
524 note: also write the local version to the `.hg/merge` directory.
525 """
525 """
526 if fcl.isabsent():
526 if fcl.isabsent():
527 localkey = nullhex
527 localkey = nullhex
528 else:
528 else:
529 localkey = mergestate.getlocalkey(fcl.path())
529 localkey = mergestate.getlocalkey(fcl.path())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
531 self._state[fd] = [
531 self._state[fd] = [
532 MERGE_RECORD_UNRESOLVED,
532 MERGE_RECORD_UNRESOLVED,
533 localkey,
533 localkey,
534 fcl.path(),
534 fcl.path(),
535 fca.path(),
535 fca.path(),
536 hex(fca.filenode()),
536 hex(fca.filenode()),
537 fco.path(),
537 fco.path(),
538 hex(fco.filenode()),
538 hex(fco.filenode()),
539 fcl.flags(),
539 fcl.flags(),
540 ]
540 ]
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
542 self._dirty = True
542 self._dirty = True
543
543
544 def addpath(self, path, frename, forigin):
544 def addpath(self, path, frename, forigin):
545 """add a new conflicting path to the merge state
545 """add a new conflicting path to the merge state
546 path: the path that conflicts
546 path: the path that conflicts
547 frename: the filename the conflicting file was renamed to
547 frename: the filename the conflicting file was renamed to
548 forigin: origin of the file ('l' or 'r' for local/remote)
548 forigin: origin of the file ('l' or 'r' for local/remote)
549 """
549 """
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
551 self._dirty = True
551 self._dirty = True
552
552
553 def __contains__(self, dfile):
553 def __contains__(self, dfile):
554 return dfile in self._state
554 return dfile in self._state
555
555
556 def __getitem__(self, dfile):
556 def __getitem__(self, dfile):
557 return self._state[dfile][0]
557 return self._state[dfile][0]
558
558
559 def __iter__(self):
559 def __iter__(self):
560 return iter(sorted(self._state))
560 return iter(sorted(self._state))
561
561
562 def files(self):
562 def files(self):
563 return self._state.keys()
563 return self._state.keys()
564
564
565 def mark(self, dfile, state):
565 def mark(self, dfile, state):
566 self._state[dfile][0] = state
566 self._state[dfile][0] = state
567 self._dirty = True
567 self._dirty = True
568
568
569 def mdstate(self):
569 def mdstate(self):
570 return self._mdstate
570 return self._mdstate
571
571
572 def unresolved(self):
572 def unresolved(self):
573 """Obtain the paths of unresolved files."""
573 """Obtain the paths of unresolved files."""
574
574
575 for f, entry in pycompat.iteritems(self._state):
575 for f, entry in pycompat.iteritems(self._state):
576 if entry[0] in (
576 if entry[0] in (
577 MERGE_RECORD_UNRESOLVED,
577 MERGE_RECORD_UNRESOLVED,
578 MERGE_RECORD_UNRESOLVED_PATH,
578 MERGE_RECORD_UNRESOLVED_PATH,
579 ):
579 ):
580 yield f
580 yield f
581
581
582 def driverresolved(self):
582 def driverresolved(self):
583 """Obtain the paths of driver-resolved files."""
583 """Obtain the paths of driver-resolved files."""
584
584
585 for f, entry in self._state.items():
585 for f, entry in self._state.items():
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
587 yield f
587 yield f
588
588
589 def extras(self, filename):
589 def extras(self, filename):
590 return self._stateextras.setdefault(filename, {})
590 return self._stateextras.setdefault(filename, {})
591
591
592 def _resolve(self, preresolve, dfile, wctx):
592 def _resolve(self, preresolve, dfile, wctx):
593 """rerun merge process for file path `dfile`"""
593 """rerun merge process for file path `dfile`"""
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
595 return True, 0
595 return True, 0
596 stateentry = self._state[dfile]
596 stateentry = self._state[dfile]
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
598 octx = self._repo[self._other]
598 octx = self._repo[self._other]
599 extras = self.extras(dfile)
599 extras = self.extras(dfile)
600 anccommitnode = extras.get(b'ancestorlinknode')
600 anccommitnode = extras.get(b'ancestorlinknode')
601 if anccommitnode:
601 if anccommitnode:
602 actx = self._repo[anccommitnode]
602 actx = self._repo[anccommitnode]
603 else:
603 else:
604 actx = None
604 actx = None
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
606 fco = self._filectxorabsent(onode, octx, ofile)
606 fco = self._filectxorabsent(onode, octx, ofile)
607 # TODO: move this to filectxorabsent
607 # TODO: move this to filectxorabsent
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
609 # "premerge" x flags
609 # "premerge" x flags
610 flo = fco.flags()
610 flo = fco.flags()
611 fla = fca.flags()
611 fla = fca.flags()
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
613 if fca.node() == nullid and flags != flo:
613 if fca.node() == nullid and flags != flo:
614 if preresolve:
614 if preresolve:
615 self._repo.ui.warn(
615 self._repo.ui.warn(
616 _(
616 _(
617 b'warning: cannot merge flags for %s '
617 b'warning: cannot merge flags for %s '
618 b'without common ancestor - keeping local flags\n'
618 b'without common ancestor - keeping local flags\n'
619 )
619 )
620 % afile
620 % afile
621 )
621 )
622 elif flags == fla:
622 elif flags == fla:
623 flags = flo
623 flags = flo
624 if preresolve:
624 if preresolve:
625 # restore local
625 # restore local
626 if localkey != nullhex:
626 if localkey != nullhex:
627 f = self._repo.vfs(b'merge/' + localkey)
627 f = self._repo.vfs(b'merge/' + localkey)
628 wctx[dfile].write(f.read(), flags)
628 wctx[dfile].write(f.read(), flags)
629 f.close()
629 f.close()
630 else:
630 else:
631 wctx[dfile].remove(ignoremissing=True)
631 wctx[dfile].remove(ignoremissing=True)
632 complete, r, deleted = filemerge.premerge(
632 complete, r, deleted = filemerge.premerge(
633 self._repo,
633 self._repo,
634 wctx,
634 wctx,
635 self._local,
635 self._local,
636 lfile,
636 lfile,
637 fcd,
637 fcd,
638 fco,
638 fco,
639 fca,
639 fca,
640 labels=self._labels,
640 labels=self._labels,
641 )
641 )
642 else:
642 else:
643 complete, r, deleted = filemerge.filemerge(
643 complete, r, deleted = filemerge.filemerge(
644 self._repo,
644 self._repo,
645 wctx,
645 wctx,
646 self._local,
646 self._local,
647 lfile,
647 lfile,
648 fcd,
648 fcd,
649 fco,
649 fco,
650 fca,
650 fca,
651 labels=self._labels,
651 labels=self._labels,
652 )
652 )
653 if r is None:
653 if r is None:
654 # no real conflict
654 # no real conflict
655 del self._state[dfile]
655 del self._state[dfile]
656 self._stateextras.pop(dfile, None)
656 self._stateextras.pop(dfile, None)
657 self._dirty = True
657 self._dirty = True
658 elif not r:
658 elif not r:
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
660
660
661 if complete:
661 if complete:
662 action = None
662 action = None
663 if deleted:
663 if deleted:
664 if fcd.isabsent():
664 if fcd.isabsent():
665 # dc: local picked. Need to drop if present, which may
665 # dc: local picked. Need to drop if present, which may
666 # happen on re-resolves.
666 # happen on re-resolves.
667 action = ACTION_FORGET
667 action = ACTION_FORGET
668 else:
668 else:
669 # cd: remote picked (or otherwise deleted)
669 # cd: remote picked (or otherwise deleted)
670 action = ACTION_REMOVE
670 action = ACTION_REMOVE
671 else:
671 else:
672 if fcd.isabsent(): # dc: remote picked
672 if fcd.isabsent(): # dc: remote picked
673 action = ACTION_GET
673 action = ACTION_GET
674 elif fco.isabsent(): # cd: local picked
674 elif fco.isabsent(): # cd: local picked
675 if dfile in self.localctx:
675 if dfile in self.localctx:
676 action = ACTION_ADD_MODIFIED
676 action = ACTION_ADD_MODIFIED
677 else:
677 else:
678 action = ACTION_ADD
678 action = ACTION_ADD
679 # else: regular merges (no action necessary)
679 # else: regular merges (no action necessary)
680 self._results[dfile] = r, action
680 self._results[dfile] = r, action
681
681
682 return complete, r
682 return complete, r
683
683
684 def _filectxorabsent(self, hexnode, ctx, f):
684 def _filectxorabsent(self, hexnode, ctx, f):
685 if hexnode == nullhex:
685 if hexnode == nullhex:
686 return filemerge.absentfilectx(ctx, f)
686 return filemerge.absentfilectx(ctx, f)
687 else:
687 else:
688 return ctx[f]
688 return ctx[f]
689
689
690 def preresolve(self, dfile, wctx):
690 def preresolve(self, dfile, wctx):
691 """run premerge process for dfile
691 """run premerge process for dfile
692
692
693 Returns whether the merge is complete, and the exit code."""
693 Returns whether the merge is complete, and the exit code."""
694 return self._resolve(True, dfile, wctx)
694 return self._resolve(True, dfile, wctx)
695
695
696 def resolve(self, dfile, wctx):
696 def resolve(self, dfile, wctx):
697 """run merge process (assuming premerge was run) for dfile
697 """run merge process (assuming premerge was run) for dfile
698
698
699 Returns the exit code of the merge."""
699 Returns the exit code of the merge."""
700 return self._resolve(False, dfile, wctx)[1]
700 return self._resolve(False, dfile, wctx)[1]
701
701
702 def counts(self):
702 def counts(self):
703 """return counts for updated, merged and removed files in this
703 """return counts for updated, merged and removed files in this
704 session"""
704 session"""
705 updated, merged, removed = 0, 0, 0
705 updated, merged, removed = 0, 0, 0
706 for r, action in pycompat.itervalues(self._results):
706 for r, action in pycompat.itervalues(self._results):
707 if r is None:
707 if r is None:
708 updated += 1
708 updated += 1
709 elif r == 0:
709 elif r == 0:
710 if action == ACTION_REMOVE:
710 if action == ACTION_REMOVE:
711 removed += 1
711 removed += 1
712 else:
712 else:
713 merged += 1
713 merged += 1
714 return updated, merged, removed
714 return updated, merged, removed
715
715
716 def unresolvedcount(self):
716 def unresolvedcount(self):
717 """get unresolved count for this merge (persistent)"""
717 """get unresolved count for this merge (persistent)"""
718 return len(list(self.unresolved()))
718 return len(list(self.unresolved()))
719
719
720 def actions(self):
720 def actions(self):
721 """return lists of actions to perform on the dirstate"""
721 """return lists of actions to perform on the dirstate"""
722 actions = {
722 actions = {
723 ACTION_REMOVE: [],
723 ACTION_REMOVE: [],
724 ACTION_FORGET: [],
724 ACTION_FORGET: [],
725 ACTION_ADD: [],
725 ACTION_ADD: [],
726 ACTION_ADD_MODIFIED: [],
726 ACTION_ADD_MODIFIED: [],
727 ACTION_GET: [],
727 ACTION_GET: [],
728 }
728 }
729 for f, (r, action) in pycompat.iteritems(self._results):
729 for f, (r, action) in pycompat.iteritems(self._results):
730 if action is not None:
730 if action is not None:
731 actions[action].append((f, None, b"merge result"))
731 actions[action].append((f, None, b"merge result"))
732 return actions
732 return actions
733
733
734 def recordactions(self):
734 def recordactions(self):
735 """record remove/add/get actions in the dirstate"""
735 """record remove/add/get actions in the dirstate"""
736 branchmerge = self._repo.dirstate.p2() != nullid
736 branchmerge = self._repo.dirstate.p2() != nullid
737 recordupdates(self._repo, self.actions(), branchmerge, None)
737 recordupdates(self._repo, self.actions(), branchmerge, None)
738
738
739 def queueremove(self, f):
739 def queueremove(self, f):
740 """queues a file to be removed from the dirstate
740 """queues a file to be removed from the dirstate
741
741
742 Meant for use by custom merge drivers."""
742 Meant for use by custom merge drivers."""
743 self._results[f] = 0, ACTION_REMOVE
743 self._results[f] = 0, ACTION_REMOVE
744
744
745 def queueadd(self, f):
745 def queueadd(self, f):
746 """queues a file to be added to the dirstate
746 """queues a file to be added to the dirstate
747
747
748 Meant for use by custom merge drivers."""
748 Meant for use by custom merge drivers."""
749 self._results[f] = 0, ACTION_ADD
749 self._results[f] = 0, ACTION_ADD
750
750
751 def queueget(self, f):
751 def queueget(self, f):
752 """queues a file to be marked modified in the dirstate
752 """queues a file to be marked modified in the dirstate
753
753
754 Meant for use by custom merge drivers."""
754 Meant for use by custom merge drivers."""
755 self._results[f] = 0, ACTION_GET
755 self._results[f] = 0, ACTION_GET
756
756
757
757
758 def _getcheckunknownconfig(repo, section, name):
758 def _getcheckunknownconfig(repo, section, name):
759 config = repo.ui.config(section, name)
759 config = repo.ui.config(section, name)
760 valid = [b'abort', b'ignore', b'warn']
760 valid = [b'abort', b'ignore', b'warn']
761 if config not in valid:
761 if config not in valid:
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
763 raise error.ConfigError(
763 raise error.ConfigError(
764 _(b"%s.%s not valid ('%s' is none of %s)")
764 _(b"%s.%s not valid ('%s' is none of %s)")
765 % (section, name, config, validstr)
765 % (section, name, config, validstr)
766 )
766 )
767 return config
767 return config
768
768
769
769
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
771 if wctx.isinmemory():
771 if wctx.isinmemory():
772 # Nothing to do in IMM because nothing in the "working copy" can be an
772 # Nothing to do in IMM because nothing in the "working copy" can be an
773 # unknown file.
773 # unknown file.
774 #
774 #
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
776 # because that function does other useful work.
776 # because that function does other useful work.
777 return False
777 return False
778
778
779 if f2 is None:
779 if f2 is None:
780 f2 = f
780 f2 = f
781 return (
781 return (
782 repo.wvfs.audit.check(f)
782 repo.wvfs.audit.check(f)
783 and repo.wvfs.isfileorlink(f)
783 and repo.wvfs.isfileorlink(f)
784 and repo.dirstate.normalize(f) not in repo.dirstate
784 and repo.dirstate.normalize(f) not in repo.dirstate
785 and mctx[f2].cmp(wctx[f])
785 and mctx[f2].cmp(wctx[f])
786 )
786 )
787
787
788
788
789 class _unknowndirschecker(object):
789 class _unknowndirschecker(object):
790 """
790 """
791 Look for any unknown files or directories that may have a path conflict
791 Look for any unknown files or directories that may have a path conflict
792 with a file. If any path prefix of the file exists as a file or link,
792 with a file. If any path prefix of the file exists as a file or link,
793 then it conflicts. If the file itself is a directory that contains any
793 then it conflicts. If the file itself is a directory that contains any
794 file that is not tracked, then it conflicts.
794 file that is not tracked, then it conflicts.
795
795
796 Returns the shortest path at which a conflict occurs, or None if there is
796 Returns the shortest path at which a conflict occurs, or None if there is
797 no conflict.
797 no conflict.
798 """
798 """
799
799
800 def __init__(self):
800 def __init__(self):
801 # A set of paths known to be good. This prevents repeated checking of
801 # A set of paths known to be good. This prevents repeated checking of
802 # dirs. It will be updated with any new dirs that are checked and found
802 # dirs. It will be updated with any new dirs that are checked and found
803 # to be safe.
803 # to be safe.
804 self._unknowndircache = set()
804 self._unknowndircache = set()
805
805
806 # A set of paths that are known to be absent. This prevents repeated
806 # A set of paths that are known to be absent. This prevents repeated
807 # checking of subdirectories that are known not to exist. It will be
807 # checking of subdirectories that are known not to exist. It will be
808 # updated with any new dirs that are checked and found to be absent.
808 # updated with any new dirs that are checked and found to be absent.
809 self._missingdircache = set()
809 self._missingdircache = set()
810
810
811 def __call__(self, repo, wctx, f):
811 def __call__(self, repo, wctx, f):
812 if wctx.isinmemory():
812 if wctx.isinmemory():
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
814 return False
814 return False
815
815
816 # Check for path prefixes that exist as unknown files.
816 # Check for path prefixes that exist as unknown files.
817 for p in reversed(list(pathutil.finddirs(f))):
817 for p in reversed(list(pathutil.finddirs(f))):
818 if p in self._missingdircache:
818 if p in self._missingdircache:
819 return
819 return
820 if p in self._unknowndircache:
820 if p in self._unknowndircache:
821 continue
821 continue
822 if repo.wvfs.audit.check(p):
822 if repo.wvfs.audit.check(p):
823 if (
823 if (
824 repo.wvfs.isfileorlink(p)
824 repo.wvfs.isfileorlink(p)
825 and repo.dirstate.normalize(p) not in repo.dirstate
825 and repo.dirstate.normalize(p) not in repo.dirstate
826 ):
826 ):
827 return p
827 return p
828 if not repo.wvfs.lexists(p):
828 if not repo.wvfs.lexists(p):
829 self._missingdircache.add(p)
829 self._missingdircache.add(p)
830 return
830 return
831 self._unknowndircache.add(p)
831 self._unknowndircache.add(p)
832
832
833 # Check if the file conflicts with a directory containing unknown files.
833 # Check if the file conflicts with a directory containing unknown files.
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
835 # Does the directory contain any files that are not in the dirstate?
835 # Does the directory contain any files that are not in the dirstate?
836 for p, dirs, files in repo.wvfs.walk(f):
836 for p, dirs, files in repo.wvfs.walk(f):
837 for fn in files:
837 for fn in files:
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
839 relf = repo.dirstate.normalize(relf, isknown=True)
839 relf = repo.dirstate.normalize(relf, isknown=True)
840 if relf not in repo.dirstate:
840 if relf not in repo.dirstate:
841 return f
841 return f
842 return None
842 return None
843
843
844
844
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
846 """
846 """
847 Considers any actions that care about the presence of conflicting unknown
847 Considers any actions that care about the presence of conflicting unknown
848 files. For some actions, the result is to abort; for others, it is to
848 files. For some actions, the result is to abort; for others, it is to
849 choose a different action.
849 choose a different action.
850 """
850 """
851 fileconflicts = set()
851 fileconflicts = set()
852 pathconflicts = set()
852 pathconflicts = set()
853 warnconflicts = set()
853 warnconflicts = set()
854 abortconflicts = set()
854 abortconflicts = set()
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
857 pathconfig = repo.ui.configbool(
857 pathconfig = repo.ui.configbool(
858 b'experimental', b'merge.checkpathconflicts'
858 b'experimental', b'merge.checkpathconflicts'
859 )
859 )
860 if not force:
860 if not force:
861
861
862 def collectconflicts(conflicts, config):
862 def collectconflicts(conflicts, config):
863 if config == b'abort':
863 if config == b'abort':
864 abortconflicts.update(conflicts)
864 abortconflicts.update(conflicts)
865 elif config == b'warn':
865 elif config == b'warn':
866 warnconflicts.update(conflicts)
866 warnconflicts.update(conflicts)
867
867
868 checkunknowndirs = _unknowndirschecker()
868 checkunknowndirs = _unknowndirschecker()
869 for f, (m, args, msg) in pycompat.iteritems(actions):
869 for f, (m, args, msg) in pycompat.iteritems(actions):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
871 if _checkunknownfile(repo, wctx, mctx, f):
871 if _checkunknownfile(repo, wctx, mctx, f):
872 fileconflicts.add(f)
872 fileconflicts.add(f)
873 elif pathconfig and f not in wctx:
873 elif pathconfig and f not in wctx:
874 path = checkunknowndirs(repo, wctx, f)
874 path = checkunknowndirs(repo, wctx, f)
875 if path is not None:
875 if path is not None:
876 pathconflicts.add(path)
876 pathconflicts.add(path)
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
879 fileconflicts.add(f)
879 fileconflicts.add(f)
880
880
881 allconflicts = fileconflicts | pathconflicts
881 allconflicts = fileconflicts | pathconflicts
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
883 unknownconflicts = allconflicts - ignoredconflicts
883 unknownconflicts = allconflicts - ignoredconflicts
884 collectconflicts(ignoredconflicts, ignoredconfig)
884 collectconflicts(ignoredconflicts, ignoredconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
886 else:
886 else:
887 for f, (m, args, msg) in pycompat.iteritems(actions):
887 for f, (m, args, msg) in pycompat.iteritems(actions):
888 if m == ACTION_CREATED_MERGE:
888 if m == ACTION_CREATED_MERGE:
889 fl2, anc = args
889 fl2, anc = args
890 different = _checkunknownfile(repo, wctx, mctx, f)
890 different = _checkunknownfile(repo, wctx, mctx, f)
891 if repo.dirstate._ignore(f):
891 if repo.dirstate._ignore(f):
892 config = ignoredconfig
892 config = ignoredconfig
893 else:
893 else:
894 config = unknownconfig
894 config = unknownconfig
895
895
896 # The behavior when force is True is described by this table:
896 # The behavior when force is True is described by this table:
897 # config different mergeforce | action backup
897 # config different mergeforce | action backup
898 # * n * | get n
898 # * n * | get n
899 # * y y | merge -
899 # * y y | merge -
900 # abort y n | merge - (1)
900 # abort y n | merge - (1)
901 # warn y n | warn + get y
901 # warn y n | warn + get y
902 # ignore y n | get y
902 # ignore y n | get y
903 #
903 #
904 # (1) this is probably the wrong behavior here -- we should
904 # (1) this is probably the wrong behavior here -- we should
905 # probably abort, but some actions like rebases currently
905 # probably abort, but some actions like rebases currently
906 # don't like an abort happening in the middle of
906 # don't like an abort happening in the middle of
907 # merge.update.
907 # merge.update.
908 if not different:
908 if not different:
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
910 elif mergeforce or config == b'abort':
910 elif mergeforce or config == b'abort':
911 actions[f] = (
911 actions[f] = (
912 ACTION_MERGE,
912 ACTION_MERGE,
913 (f, f, None, False, anc),
913 (f, f, None, False, anc),
914 b'remote differs from untracked local',
914 b'remote differs from untracked local',
915 )
915 )
916 elif config == b'abort':
916 elif config == b'abort':
917 abortconflicts.add(f)
917 abortconflicts.add(f)
918 else:
918 else:
919 if config == b'warn':
919 if config == b'warn':
920 warnconflicts.add(f)
920 warnconflicts.add(f)
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
922
922
923 for f in sorted(abortconflicts):
923 for f in sorted(abortconflicts):
924 warn = repo.ui.warn
924 warn = repo.ui.warn
925 if f in pathconflicts:
925 if f in pathconflicts:
926 if repo.wvfs.isfileorlink(f):
926 if repo.wvfs.isfileorlink(f):
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
928 else:
928 else:
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
930 else:
930 else:
931 warn(_(b"%s: untracked file differs\n") % f)
931 warn(_(b"%s: untracked file differs\n") % f)
932 if abortconflicts:
932 if abortconflicts:
933 raise error.Abort(
933 raise error.Abort(
934 _(
934 _(
935 b"untracked files in working directory "
935 b"untracked files in working directory "
936 b"differ from files in requested revision"
936 b"differ from files in requested revision"
937 )
937 )
938 )
938 )
939
939
940 for f in sorted(warnconflicts):
940 for f in sorted(warnconflicts):
941 if repo.wvfs.isfileorlink(f):
941 if repo.wvfs.isfileorlink(f):
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
943 else:
943 else:
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
945
945
946 for f, (m, args, msg) in pycompat.iteritems(actions):
946 for f, (m, args, msg) in pycompat.iteritems(actions):
947 if m == ACTION_CREATED:
947 if m == ACTION_CREATED:
948 backup = (
948 backup = (
949 f in fileconflicts
949 f in fileconflicts
950 or f in pathconflicts
950 or f in pathconflicts
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
952 )
952 )
953 (flags,) = args
953 (flags,) = args
954 actions[f] = (ACTION_GET, (flags, backup), msg)
954 actions[f] = (ACTION_GET, (flags, backup), msg)
955
955
956
956
957 def _forgetremoved(wctx, mctx, branchmerge):
957 def _forgetremoved(wctx, mctx, branchmerge):
958 """
958 """
959 Forget removed files
959 Forget removed files
960
960
961 If we're jumping between revisions (as opposed to merging), and if
961 If we're jumping between revisions (as opposed to merging), and if
962 neither the working directory nor the target rev has the file,
962 neither the working directory nor the target rev has the file,
963 then we need to remove it from the dirstate, to prevent the
963 then we need to remove it from the dirstate, to prevent the
964 dirstate from listing the file when it is no longer in the
964 dirstate from listing the file when it is no longer in the
965 manifest.
965 manifest.
966
966
967 If we're merging, and the other revision has removed a file
967 If we're merging, and the other revision has removed a file
968 that is not present in the working directory, we need to mark it
968 that is not present in the working directory, we need to mark it
969 as removed.
969 as removed.
970 """
970 """
971
971
972 actions = {}
972 actions = {}
973 m = ACTION_FORGET
973 m = ACTION_FORGET
974 if branchmerge:
974 if branchmerge:
975 m = ACTION_REMOVE
975 m = ACTION_REMOVE
976 for f in wctx.deleted():
976 for f in wctx.deleted():
977 if f not in mctx:
977 if f not in mctx:
978 actions[f] = m, None, b"forget deleted"
978 actions[f] = m, None, b"forget deleted"
979
979
980 if not branchmerge:
980 if not branchmerge:
981 for f in wctx.removed():
981 for f in wctx.removed():
982 if f not in mctx:
982 if f not in mctx:
983 actions[f] = ACTION_FORGET, None, b"forget removed"
983 actions[f] = ACTION_FORGET, None, b"forget removed"
984
984
985 return actions
985 return actions
986
986
987
987
988 def _checkcollision(repo, wmf, actions):
988 def _checkcollision(repo, wmf, actions):
989 """
989 """
990 Check for case-folding collisions.
990 Check for case-folding collisions.
991 """
991 """
992
992
993 # If the repo is narrowed, filter out files outside the narrowspec.
993 # If the repo is narrowed, filter out files outside the narrowspec.
994 narrowmatch = repo.narrowmatch()
994 narrowmatch = repo.narrowmatch()
995 if not narrowmatch.always():
995 if not narrowmatch.always():
996 wmf = wmf.matches(narrowmatch)
996 wmf = wmf.matches(narrowmatch)
997 if actions:
997 if actions:
998 narrowactions = {}
998 narrowactions = {}
999 for m, actionsfortype in pycompat.iteritems(actions):
999 for m, actionsfortype in pycompat.iteritems(actions):
1000 narrowactions[m] = []
1000 narrowactions[m] = []
1001 for (f, args, msg) in actionsfortype:
1001 for (f, args, msg) in actionsfortype:
1002 if narrowmatch(f):
1002 if narrowmatch(f):
1003 narrowactions[m].append((f, args, msg))
1003 narrowactions[m].append((f, args, msg))
1004 actions = narrowactions
1004 actions = narrowactions
1005
1005
1006 # build provisional merged manifest up
1006 # build provisional merged manifest up
1007 pmmf = set(wmf)
1007 pmmf = set(wmf)
1008
1008
1009 if actions:
1009 if actions:
1010 # KEEP and EXEC are no-op
1010 # KEEP and EXEC are no-op
1011 for m in (
1011 for m in (
1012 ACTION_ADD,
1012 ACTION_ADD,
1013 ACTION_ADD_MODIFIED,
1013 ACTION_ADD_MODIFIED,
1014 ACTION_FORGET,
1014 ACTION_FORGET,
1015 ACTION_GET,
1015 ACTION_GET,
1016 ACTION_CHANGED_DELETED,
1016 ACTION_CHANGED_DELETED,
1017 ACTION_DELETED_CHANGED,
1017 ACTION_DELETED_CHANGED,
1018 ):
1018 ):
1019 for f, args, msg in actions[m]:
1019 for f, args, msg in actions[m]:
1020 pmmf.add(f)
1020 pmmf.add(f)
1021 for f, args, msg in actions[ACTION_REMOVE]:
1021 for f, args, msg in actions[ACTION_REMOVE]:
1022 pmmf.discard(f)
1022 pmmf.discard(f)
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1024 f2, flags = args
1024 f2, flags = args
1025 pmmf.discard(f2)
1025 pmmf.discard(f2)
1026 pmmf.add(f)
1026 pmmf.add(f)
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1028 pmmf.add(f)
1028 pmmf.add(f)
1029 for f, args, msg in actions[ACTION_MERGE]:
1029 for f, args, msg in actions[ACTION_MERGE]:
1030 f1, f2, fa, move, anc = args
1030 f1, f2, fa, move, anc = args
1031 if move:
1031 if move:
1032 pmmf.discard(f1)
1032 pmmf.discard(f1)
1033 pmmf.add(f)
1033 pmmf.add(f)
1034
1034
1035 # check case-folding collision in provisional merged manifest
1035 # check case-folding collision in provisional merged manifest
1036 foldmap = {}
1036 foldmap = {}
1037 for f in pmmf:
1037 for f in pmmf:
1038 fold = util.normcase(f)
1038 fold = util.normcase(f)
1039 if fold in foldmap:
1039 if fold in foldmap:
1040 raise error.Abort(
1040 raise error.Abort(
1041 _(b"case-folding collision between %s and %s")
1041 _(b"case-folding collision between %s and %s")
1042 % (f, foldmap[fold])
1042 % (f, foldmap[fold])
1043 )
1043 )
1044 foldmap[fold] = f
1044 foldmap[fold] = f
1045
1045
1046 # check case-folding of directories
1046 # check case-folding of directories
1047 foldprefix = unfoldprefix = lastfull = b''
1047 foldprefix = unfoldprefix = lastfull = b''
1048 for fold, f in sorted(foldmap.items()):
1048 for fold, f in sorted(foldmap.items()):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1050 # the folded prefix matches but actual casing is different
1050 # the folded prefix matches but actual casing is different
1051 raise error.Abort(
1051 raise error.Abort(
1052 _(b"case-folding collision between %s and directory of %s")
1052 _(b"case-folding collision between %s and directory of %s")
1053 % (lastfull, f)
1053 % (lastfull, f)
1054 )
1054 )
1055 foldprefix = fold + b'/'
1055 foldprefix = fold + b'/'
1056 unfoldprefix = f + b'/'
1056 unfoldprefix = f + b'/'
1057 lastfull = f
1057 lastfull = f
1058
1058
1059
1059
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1061 """run the preprocess step of the merge driver, if any
1061 """run the preprocess step of the merge driver, if any
1062
1062
1063 This is currently not implemented -- it's an extension point."""
1063 This is currently not implemented -- it's an extension point."""
1064 return True
1064 return True
1065
1065
1066
1066
1067 def driverconclude(repo, ms, wctx, labels=None):
1067 def driverconclude(repo, ms, wctx, labels=None):
1068 """run the conclude step of the merge driver, if any
1068 """run the conclude step of the merge driver, if any
1069
1069
1070 This is currently not implemented -- it's an extension point."""
1070 This is currently not implemented -- it's an extension point."""
1071 return True
1071 return True
1072
1072
1073
1073
1074 def _filesindirs(repo, manifest, dirs):
1074 def _filesindirs(repo, manifest, dirs):
1075 """
1075 """
1076 Generator that yields pairs of all the files in the manifest that are found
1076 Generator that yields pairs of all the files in the manifest that are found
1077 inside the directories listed in dirs, and which directory they are found
1077 inside the directories listed in dirs, and which directory they are found
1078 in.
1078 in.
1079 """
1079 """
1080 for f in manifest:
1080 for f in manifest:
1081 for p in pathutil.finddirs(f):
1081 for p in pathutil.finddirs(f):
1082 if p in dirs:
1082 if p in dirs:
1083 yield f, p
1083 yield f, p
1084 break
1084 break
1085
1085
1086
1086
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1088 """
1088 """
1089 Check if any actions introduce path conflicts in the repository, updating
1089 Check if any actions introduce path conflicts in the repository, updating
1090 actions to record or handle the path conflict accordingly.
1090 actions to record or handle the path conflict accordingly.
1091 """
1091 """
1092 mf = wctx.manifest()
1092 mf = wctx.manifest()
1093
1093
1094 # The set of local files that conflict with a remote directory.
1094 # The set of local files that conflict with a remote directory.
1095 localconflicts = set()
1095 localconflicts = set()
1096
1096
1097 # The set of directories that conflict with a remote file, and so may cause
1097 # The set of directories that conflict with a remote file, and so may cause
1098 # conflicts if they still contain any files after the merge.
1098 # conflicts if they still contain any files after the merge.
1099 remoteconflicts = set()
1099 remoteconflicts = set()
1100
1100
1101 # The set of directories that appear as both a file and a directory in the
1101 # The set of directories that appear as both a file and a directory in the
1102 # remote manifest. These indicate an invalid remote manifest, which
1102 # remote manifest. These indicate an invalid remote manifest, which
1103 # can't be updated to cleanly.
1103 # can't be updated to cleanly.
1104 invalidconflicts = set()
1104 invalidconflicts = set()
1105
1105
1106 # The set of directories that contain files that are being created.
1106 # The set of directories that contain files that are being created.
1107 createdfiledirs = set()
1107 createdfiledirs = set()
1108
1108
1109 # The set of files deleted by all the actions.
1109 # The set of files deleted by all the actions.
1110 deletedfiles = set()
1110 deletedfiles = set()
1111
1111
1112 for f, (m, args, msg) in actions.items():
1112 for f, (m, args, msg) in actions.items():
1113 if m in (
1113 if m in (
1114 ACTION_CREATED,
1114 ACTION_CREATED,
1115 ACTION_DELETED_CHANGED,
1115 ACTION_DELETED_CHANGED,
1116 ACTION_MERGE,
1116 ACTION_MERGE,
1117 ACTION_CREATED_MERGE,
1117 ACTION_CREATED_MERGE,
1118 ):
1118 ):
1119 # This action may create a new local file.
1119 # This action may create a new local file.
1120 createdfiledirs.update(pathutil.finddirs(f))
1120 createdfiledirs.update(pathutil.finddirs(f))
1121 if mf.hasdir(f):
1121 if mf.hasdir(f):
1122 # The file aliases a local directory. This might be ok if all
1122 # The file aliases a local directory. This might be ok if all
1123 # the files in the local directory are being deleted. This
1123 # the files in the local directory are being deleted. This
1124 # will be checked once we know what all the deleted files are.
1124 # will be checked once we know what all the deleted files are.
1125 remoteconflicts.add(f)
1125 remoteconflicts.add(f)
1126 # Track the names of all deleted files.
1126 # Track the names of all deleted files.
1127 if m == ACTION_REMOVE:
1127 if m == ACTION_REMOVE:
1128 deletedfiles.add(f)
1128 deletedfiles.add(f)
1129 if m == ACTION_MERGE:
1129 if m == ACTION_MERGE:
1130 f1, f2, fa, move, anc = args
1130 f1, f2, fa, move, anc = args
1131 if move:
1131 if move:
1132 deletedfiles.add(f1)
1132 deletedfiles.add(f1)
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1134 f2, flags = args
1134 f2, flags = args
1135 deletedfiles.add(f2)
1135 deletedfiles.add(f2)
1136
1136
1137 # Check all directories that contain created files for path conflicts.
1137 # Check all directories that contain created files for path conflicts.
1138 for p in createdfiledirs:
1138 for p in createdfiledirs:
1139 if p in mf:
1139 if p in mf:
1140 if p in mctx:
1140 if p in mctx:
1141 # A file is in a directory which aliases both a local
1141 # A file is in a directory which aliases both a local
1142 # and a remote file. This is an internal inconsistency
1142 # and a remote file. This is an internal inconsistency
1143 # within the remote manifest.
1143 # within the remote manifest.
1144 invalidconflicts.add(p)
1144 invalidconflicts.add(p)
1145 else:
1145 else:
1146 # A file is in a directory which aliases a local file.
1146 # A file is in a directory which aliases a local file.
1147 # We will need to rename the local file.
1147 # We will need to rename the local file.
1148 localconflicts.add(p)
1148 localconflicts.add(p)
1149 if p in actions and actions[p][0] in (
1149 if p in actions and actions[p][0] in (
1150 ACTION_CREATED,
1150 ACTION_CREATED,
1151 ACTION_DELETED_CHANGED,
1151 ACTION_DELETED_CHANGED,
1152 ACTION_MERGE,
1152 ACTION_MERGE,
1153 ACTION_CREATED_MERGE,
1153 ACTION_CREATED_MERGE,
1154 ):
1154 ):
1155 # The file is in a directory which aliases a remote file.
1155 # The file is in a directory which aliases a remote file.
1156 # This is an internal inconsistency within the remote
1156 # This is an internal inconsistency within the remote
1157 # manifest.
1157 # manifest.
1158 invalidconflicts.add(p)
1158 invalidconflicts.add(p)
1159
1159
1160 # Rename all local conflicting files that have not been deleted.
1160 # Rename all local conflicting files that have not been deleted.
1161 for p in localconflicts:
1161 for p in localconflicts:
1162 if p not in deletedfiles:
1162 if p not in deletedfiles:
1163 ctxname = bytes(wctx).rstrip(b'+')
1163 ctxname = bytes(wctx).rstrip(b'+')
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1165 actions[pnew] = (
1165 actions[pnew] = (
1166 ACTION_PATH_CONFLICT_RESOLVE,
1166 ACTION_PATH_CONFLICT_RESOLVE,
1167 (p,),
1167 (p,),
1168 b'local path conflict',
1168 b'local path conflict',
1169 )
1169 )
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1171
1171
1172 if remoteconflicts:
1172 if remoteconflicts:
1173 # Check if all files in the conflicting directories have been removed.
1173 # Check if all files in the conflicting directories have been removed.
1174 ctxname = bytes(mctx).rstrip(b'+')
1174 ctxname = bytes(mctx).rstrip(b'+')
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1176 if f not in deletedfiles:
1176 if f not in deletedfiles:
1177 m, args, msg = actions[p]
1177 m, args, msg = actions[p]
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1180 # Action was merge, just update target.
1180 # Action was merge, just update target.
1181 actions[pnew] = (m, args, msg)
1181 actions[pnew] = (m, args, msg)
1182 else:
1182 else:
1183 # Action was create, change to renamed get action.
1183 # Action was create, change to renamed get action.
1184 fl = args[0]
1184 fl = args[0]
1185 actions[pnew] = (
1185 actions[pnew] = (
1186 ACTION_LOCAL_DIR_RENAME_GET,
1186 ACTION_LOCAL_DIR_RENAME_GET,
1187 (p, fl),
1187 (p, fl),
1188 b'remote path conflict',
1188 b'remote path conflict',
1189 )
1189 )
1190 actions[p] = (
1190 actions[p] = (
1191 ACTION_PATH_CONFLICT,
1191 ACTION_PATH_CONFLICT,
1192 (pnew, ACTION_REMOVE),
1192 (pnew, ACTION_REMOVE),
1193 b'path conflict',
1193 b'path conflict',
1194 )
1194 )
1195 remoteconflicts.remove(p)
1195 remoteconflicts.remove(p)
1196 break
1196 break
1197
1197
1198 if invalidconflicts:
1198 if invalidconflicts:
1199 for p in invalidconflicts:
1199 for p in invalidconflicts:
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1202
1202
1203
1203
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1205 """
1205 """
1206 Filters out actions that can ignored because the repo is narrowed.
1206 Filters out actions that can ignored because the repo is narrowed.
1207
1207
1208 Raise an exception if the merge cannot be completed because the repo is
1208 Raise an exception if the merge cannot be completed because the repo is
1209 narrowed.
1209 narrowed.
1210 """
1210 """
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1213 # We mutate the items in the dict during iteration, so iterate
1213 # We mutate the items in the dict during iteration, so iterate
1214 # over a copy.
1214 # over a copy.
1215 for f, action in list(actions.items()):
1215 for f, action in list(actions.items()):
1216 if narrowmatch(f):
1216 if narrowmatch(f):
1217 pass
1217 pass
1218 elif not branchmerge:
1218 elif not branchmerge:
1219 del actions[f] # just updating, ignore changes outside clone
1219 del actions[f] # just updating, ignore changes outside clone
1220 elif action[0] in nooptypes:
1220 elif action[0] in nooptypes:
1221 del actions[f] # merge does not affect file
1221 del actions[f] # merge does not affect file
1222 elif action[0] in nonconflicttypes:
1222 elif action[0] in nonconflicttypes:
1223 raise error.Abort(
1223 raise error.Abort(
1224 _(
1224 _(
1225 b'merge affects file \'%s\' outside narrow, '
1225 b'merge affects file \'%s\' outside narrow, '
1226 b'which is not yet supported'
1226 b'which is not yet supported'
1227 )
1227 )
1228 % f,
1228 % f,
1229 hint=_(b'merging in the other direction may work'),
1229 hint=_(b'merging in the other direction may work'),
1230 )
1230 )
1231 else:
1231 else:
1232 raise error.Abort(
1232 raise error.Abort(
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1234 )
1234 )
1235
1235
1236
1236
1237 def manifestmerge(
1237 def manifestmerge(
1238 repo,
1238 repo,
1239 wctx,
1239 wctx,
1240 p2,
1240 p2,
1241 pa,
1241 pa,
1242 branchmerge,
1242 branchmerge,
1243 force,
1243 force,
1244 matcher,
1244 matcher,
1245 acceptremote,
1245 acceptremote,
1246 followcopies,
1246 followcopies,
1247 forcefulldiff=False,
1247 forcefulldiff=False,
1248 ):
1248 ):
1249 """
1249 """
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1251
1251
1252 branchmerge and force are as passed in to update
1252 branchmerge and force are as passed in to update
1253 matcher = matcher to filter file lists
1253 matcher = matcher to filter file lists
1254 acceptremote = accept the incoming changes without prompting
1254 acceptremote = accept the incoming changes without prompting
1255 """
1255 """
1256 if matcher is not None and matcher.always():
1256 if matcher is not None and matcher.always():
1257 matcher = None
1257 matcher = None
1258
1258
1259 # manifests fetched in order are going to be faster, so prime the caches
1259 # manifests fetched in order are going to be faster, so prime the caches
1260 [
1260 [
1261 x.manifest()
1261 x.manifest()
1262 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1262 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1263 ]
1263 ]
1264
1264
1265 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1265 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1266 if followcopies:
1266 if followcopies:
1267 ret = copies.mergecopies(repo, wctx, p2, pa)
1267 branch_copies, diverge = copies.mergecopies(repo, wctx, p2, pa)
1268 copy, movewithdir, diverge, renamedelete, dirmove = ret
1268 copy = branch_copies.copy
1269 renamedelete = branch_copies.renamedelete
1270 dirmove = branch_copies.dirmove
1271 movewithdir = branch_copies.movewithdir
1269
1272
1270 boolbm = pycompat.bytestr(bool(branchmerge))
1273 boolbm = pycompat.bytestr(bool(branchmerge))
1271 boolf = pycompat.bytestr(bool(force))
1274 boolf = pycompat.bytestr(bool(force))
1272 boolm = pycompat.bytestr(bool(matcher))
1275 boolm = pycompat.bytestr(bool(matcher))
1273 repo.ui.note(_(b"resolving manifests\n"))
1276 repo.ui.note(_(b"resolving manifests\n"))
1274 repo.ui.debug(
1277 repo.ui.debug(
1275 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1278 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1276 )
1279 )
1277 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1280 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1278
1281
1279 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1282 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1280 copied = set(copy.values())
1283 copied = set(copy.values())
1281 copied.update(movewithdir.values())
1284 copied.update(movewithdir.values())
1282
1285
1283 if b'.hgsubstate' in m1 and wctx.rev() is None:
1286 if b'.hgsubstate' in m1 and wctx.rev() is None:
1284 # Check whether sub state is modified, and overwrite the manifest
1287 # Check whether sub state is modified, and overwrite the manifest
1285 # to flag the change. If wctx is a committed revision, we shouldn't
1288 # to flag the change. If wctx is a committed revision, we shouldn't
1286 # care for the dirty state of the working directory.
1289 # care for the dirty state of the working directory.
1287 if any(wctx.sub(s).dirty() for s in wctx.substate):
1290 if any(wctx.sub(s).dirty() for s in wctx.substate):
1288 m1[b'.hgsubstate'] = modifiednodeid
1291 m1[b'.hgsubstate'] = modifiednodeid
1289
1292
1290 # Don't use m2-vs-ma optimization if:
1293 # Don't use m2-vs-ma optimization if:
1291 # - ma is the same as m1 or m2, which we're just going to diff again later
1294 # - ma is the same as m1 or m2, which we're just going to diff again later
1292 # - The caller specifically asks for a full diff, which is useful during bid
1295 # - The caller specifically asks for a full diff, which is useful during bid
1293 # merge.
1296 # merge.
1294 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1297 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1295 # Identify which files are relevant to the merge, so we can limit the
1298 # Identify which files are relevant to the merge, so we can limit the
1296 # total m1-vs-m2 diff to just those files. This has significant
1299 # total m1-vs-m2 diff to just those files. This has significant
1297 # performance benefits in large repositories.
1300 # performance benefits in large repositories.
1298 relevantfiles = set(ma.diff(m2).keys())
1301 relevantfiles = set(ma.diff(m2).keys())
1299
1302
1300 # For copied and moved files, we need to add the source file too.
1303 # For copied and moved files, we need to add the source file too.
1301 for copykey, copyvalue in pycompat.iteritems(copy):
1304 for copykey, copyvalue in pycompat.iteritems(copy):
1302 if copyvalue in relevantfiles:
1305 if copyvalue in relevantfiles:
1303 relevantfiles.add(copykey)
1306 relevantfiles.add(copykey)
1304 for movedirkey in movewithdir:
1307 for movedirkey in movewithdir:
1305 relevantfiles.add(movedirkey)
1308 relevantfiles.add(movedirkey)
1306 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1309 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1307 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1310 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1308
1311
1309 diff = m1.diff(m2, match=matcher)
1312 diff = m1.diff(m2, match=matcher)
1310
1313
1311 actions = {}
1314 actions = {}
1312 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1315 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1313 if n1 and n2: # file exists on both local and remote side
1316 if n1 and n2: # file exists on both local and remote side
1314 if f not in ma:
1317 if f not in ma:
1315 fa = copy.get(f, None)
1318 fa = copy.get(f, None)
1316 if fa is not None:
1319 if fa is not None:
1317 actions[f] = (
1320 actions[f] = (
1318 ACTION_MERGE,
1321 ACTION_MERGE,
1319 (f, f, fa, False, pa.node()),
1322 (f, f, fa, False, pa.node()),
1320 b'both renamed from %s' % fa,
1323 b'both renamed from %s' % fa,
1321 )
1324 )
1322 else:
1325 else:
1323 actions[f] = (
1326 actions[f] = (
1324 ACTION_MERGE,
1327 ACTION_MERGE,
1325 (f, f, None, False, pa.node()),
1328 (f, f, None, False, pa.node()),
1326 b'both created',
1329 b'both created',
1327 )
1330 )
1328 else:
1331 else:
1329 a = ma[f]
1332 a = ma[f]
1330 fla = ma.flags(f)
1333 fla = ma.flags(f)
1331 nol = b'l' not in fl1 + fl2 + fla
1334 nol = b'l' not in fl1 + fl2 + fla
1332 if n2 == a and fl2 == fla:
1335 if n2 == a and fl2 == fla:
1333 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1336 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1334 elif n1 == a and fl1 == fla: # local unchanged - use remote
1337 elif n1 == a and fl1 == fla: # local unchanged - use remote
1335 if n1 == n2: # optimization: keep local content
1338 if n1 == n2: # optimization: keep local content
1336 actions[f] = (
1339 actions[f] = (
1337 ACTION_EXEC,
1340 ACTION_EXEC,
1338 (fl2,),
1341 (fl2,),
1339 b'update permissions',
1342 b'update permissions',
1340 )
1343 )
1341 else:
1344 else:
1342 actions[f] = (
1345 actions[f] = (
1343 ACTION_GET,
1346 ACTION_GET,
1344 (fl2, False),
1347 (fl2, False),
1345 b'remote is newer',
1348 b'remote is newer',
1346 )
1349 )
1347 elif nol and n2 == a: # remote only changed 'x'
1350 elif nol and n2 == a: # remote only changed 'x'
1348 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1351 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1349 elif nol and n1 == a: # local only changed 'x'
1352 elif nol and n1 == a: # local only changed 'x'
1350 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1353 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1351 else: # both changed something
1354 else: # both changed something
1352 actions[f] = (
1355 actions[f] = (
1353 ACTION_MERGE,
1356 ACTION_MERGE,
1354 (f, f, f, False, pa.node()),
1357 (f, f, f, False, pa.node()),
1355 b'versions differ',
1358 b'versions differ',
1356 )
1359 )
1357 elif n1: # file exists only on local side
1360 elif n1: # file exists only on local side
1358 if f in copied:
1361 if f in copied:
1359 pass # we'll deal with it on m2 side
1362 pass # we'll deal with it on m2 side
1360 elif f in movewithdir: # directory rename, move local
1363 elif f in movewithdir: # directory rename, move local
1361 f2 = movewithdir[f]
1364 f2 = movewithdir[f]
1362 if f2 in m2:
1365 if f2 in m2:
1363 actions[f2] = (
1366 actions[f2] = (
1364 ACTION_MERGE,
1367 ACTION_MERGE,
1365 (f, f2, None, True, pa.node()),
1368 (f, f2, None, True, pa.node()),
1366 b'remote directory rename, both created',
1369 b'remote directory rename, both created',
1367 )
1370 )
1368 else:
1371 else:
1369 actions[f2] = (
1372 actions[f2] = (
1370 ACTION_DIR_RENAME_MOVE_LOCAL,
1373 ACTION_DIR_RENAME_MOVE_LOCAL,
1371 (f, fl1),
1374 (f, fl1),
1372 b'remote directory rename - move from %s' % f,
1375 b'remote directory rename - move from %s' % f,
1373 )
1376 )
1374 elif f in copy:
1377 elif f in copy:
1375 f2 = copy[f]
1378 f2 = copy[f]
1376 actions[f] = (
1379 actions[f] = (
1377 ACTION_MERGE,
1380 ACTION_MERGE,
1378 (f, f2, f2, False, pa.node()),
1381 (f, f2, f2, False, pa.node()),
1379 b'local copied/moved from %s' % f2,
1382 b'local copied/moved from %s' % f2,
1380 )
1383 )
1381 elif f in ma: # clean, a different, no remote
1384 elif f in ma: # clean, a different, no remote
1382 if n1 != ma[f]:
1385 if n1 != ma[f]:
1383 if acceptremote:
1386 if acceptremote:
1384 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1387 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1385 else:
1388 else:
1386 actions[f] = (
1389 actions[f] = (
1387 ACTION_CHANGED_DELETED,
1390 ACTION_CHANGED_DELETED,
1388 (f, None, f, False, pa.node()),
1391 (f, None, f, False, pa.node()),
1389 b'prompt changed/deleted',
1392 b'prompt changed/deleted',
1390 )
1393 )
1391 elif n1 == addednodeid:
1394 elif n1 == addednodeid:
1392 # This extra 'a' is added by working copy manifest to mark
1395 # This extra 'a' is added by working copy manifest to mark
1393 # the file as locally added. We should forget it instead of
1396 # the file as locally added. We should forget it instead of
1394 # deleting it.
1397 # deleting it.
1395 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1398 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1396 else:
1399 else:
1397 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1400 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1398 elif n2: # file exists only on remote side
1401 elif n2: # file exists only on remote side
1399 if f in copied:
1402 if f in copied:
1400 pass # we'll deal with it on m1 side
1403 pass # we'll deal with it on m1 side
1401 elif f in movewithdir:
1404 elif f in movewithdir:
1402 f2 = movewithdir[f]
1405 f2 = movewithdir[f]
1403 if f2 in m1:
1406 if f2 in m1:
1404 actions[f2] = (
1407 actions[f2] = (
1405 ACTION_MERGE,
1408 ACTION_MERGE,
1406 (f2, f, None, False, pa.node()),
1409 (f2, f, None, False, pa.node()),
1407 b'local directory rename, both created',
1410 b'local directory rename, both created',
1408 )
1411 )
1409 else:
1412 else:
1410 actions[f2] = (
1413 actions[f2] = (
1411 ACTION_LOCAL_DIR_RENAME_GET,
1414 ACTION_LOCAL_DIR_RENAME_GET,
1412 (f, fl2),
1415 (f, fl2),
1413 b'local directory rename - get from %s' % f,
1416 b'local directory rename - get from %s' % f,
1414 )
1417 )
1415 elif f in copy:
1418 elif f in copy:
1416 f2 = copy[f]
1419 f2 = copy[f]
1417 if f2 in m2:
1420 if f2 in m2:
1418 actions[f] = (
1421 actions[f] = (
1419 ACTION_MERGE,
1422 ACTION_MERGE,
1420 (f2, f, f2, False, pa.node()),
1423 (f2, f, f2, False, pa.node()),
1421 b'remote copied from %s' % f2,
1424 b'remote copied from %s' % f2,
1422 )
1425 )
1423 else:
1426 else:
1424 actions[f] = (
1427 actions[f] = (
1425 ACTION_MERGE,
1428 ACTION_MERGE,
1426 (f2, f, f2, True, pa.node()),
1429 (f2, f, f2, True, pa.node()),
1427 b'remote moved from %s' % f2,
1430 b'remote moved from %s' % f2,
1428 )
1431 )
1429 elif f not in ma:
1432 elif f not in ma:
1430 # local unknown, remote created: the logic is described by the
1433 # local unknown, remote created: the logic is described by the
1431 # following table:
1434 # following table:
1432 #
1435 #
1433 # force branchmerge different | action
1436 # force branchmerge different | action
1434 # n * * | create
1437 # n * * | create
1435 # y n * | create
1438 # y n * | create
1436 # y y n | create
1439 # y y n | create
1437 # y y y | merge
1440 # y y y | merge
1438 #
1441 #
1439 # Checking whether the files are different is expensive, so we
1442 # Checking whether the files are different is expensive, so we
1440 # don't do that when we can avoid it.
1443 # don't do that when we can avoid it.
1441 if not force:
1444 if not force:
1442 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1445 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1443 elif not branchmerge:
1446 elif not branchmerge:
1444 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1447 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1445 else:
1448 else:
1446 actions[f] = (
1449 actions[f] = (
1447 ACTION_CREATED_MERGE,
1450 ACTION_CREATED_MERGE,
1448 (fl2, pa.node()),
1451 (fl2, pa.node()),
1449 b'remote created, get or merge',
1452 b'remote created, get or merge',
1450 )
1453 )
1451 elif n2 != ma[f]:
1454 elif n2 != ma[f]:
1452 df = None
1455 df = None
1453 for d in dirmove:
1456 for d in dirmove:
1454 if f.startswith(d):
1457 if f.startswith(d):
1455 # new file added in a directory that was moved
1458 # new file added in a directory that was moved
1456 df = dirmove[d] + f[len(d) :]
1459 df = dirmove[d] + f[len(d) :]
1457 break
1460 break
1458 if df is not None and df in m1:
1461 if df is not None and df in m1:
1459 actions[df] = (
1462 actions[df] = (
1460 ACTION_MERGE,
1463 ACTION_MERGE,
1461 (df, f, f, False, pa.node()),
1464 (df, f, f, False, pa.node()),
1462 b'local directory rename - respect move '
1465 b'local directory rename - respect move '
1463 b'from %s' % f,
1466 b'from %s' % f,
1464 )
1467 )
1465 elif acceptremote:
1468 elif acceptremote:
1466 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1469 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1467 else:
1470 else:
1468 actions[f] = (
1471 actions[f] = (
1469 ACTION_DELETED_CHANGED,
1472 ACTION_DELETED_CHANGED,
1470 (None, f, f, False, pa.node()),
1473 (None, f, f, False, pa.node()),
1471 b'prompt deleted/changed',
1474 b'prompt deleted/changed',
1472 )
1475 )
1473
1476
1474 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1477 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1475 # If we are merging, look for path conflicts.
1478 # If we are merging, look for path conflicts.
1476 checkpathconflicts(repo, wctx, p2, actions)
1479 checkpathconflicts(repo, wctx, p2, actions)
1477
1480
1478 narrowmatch = repo.narrowmatch()
1481 narrowmatch = repo.narrowmatch()
1479 if not narrowmatch.always():
1482 if not narrowmatch.always():
1480 # Updates "actions" in place
1483 # Updates "actions" in place
1481 _filternarrowactions(narrowmatch, branchmerge, actions)
1484 _filternarrowactions(narrowmatch, branchmerge, actions)
1482
1485
1483 return actions, diverge, renamedelete
1486 return actions, diverge, renamedelete
1484
1487
1485
1488
1486 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1489 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1487 """Resolves false conflicts where the nodeid changed but the content
1490 """Resolves false conflicts where the nodeid changed but the content
1488 remained the same."""
1491 remained the same."""
1489 # We force a copy of actions.items() because we're going to mutate
1492 # We force a copy of actions.items() because we're going to mutate
1490 # actions as we resolve trivial conflicts.
1493 # actions as we resolve trivial conflicts.
1491 for f, (m, args, msg) in list(actions.items()):
1494 for f, (m, args, msg) in list(actions.items()):
1492 if (
1495 if (
1493 m == ACTION_CHANGED_DELETED
1496 m == ACTION_CHANGED_DELETED
1494 and f in ancestor
1497 and f in ancestor
1495 and not wctx[f].cmp(ancestor[f])
1498 and not wctx[f].cmp(ancestor[f])
1496 ):
1499 ):
1497 # local did change but ended up with same content
1500 # local did change but ended up with same content
1498 actions[f] = ACTION_REMOVE, None, b'prompt same'
1501 actions[f] = ACTION_REMOVE, None, b'prompt same'
1499 elif (
1502 elif (
1500 m == ACTION_DELETED_CHANGED
1503 m == ACTION_DELETED_CHANGED
1501 and f in ancestor
1504 and f in ancestor
1502 and not mctx[f].cmp(ancestor[f])
1505 and not mctx[f].cmp(ancestor[f])
1503 ):
1506 ):
1504 # remote did change but ended up with same content
1507 # remote did change but ended up with same content
1505 del actions[f] # don't get = keep local deleted
1508 del actions[f] # don't get = keep local deleted
1506
1509
1507
1510
1508 def calculateupdates(
1511 def calculateupdates(
1509 repo,
1512 repo,
1510 wctx,
1513 wctx,
1511 mctx,
1514 mctx,
1512 ancestors,
1515 ancestors,
1513 branchmerge,
1516 branchmerge,
1514 force,
1517 force,
1515 acceptremote,
1518 acceptremote,
1516 followcopies,
1519 followcopies,
1517 matcher=None,
1520 matcher=None,
1518 mergeforce=False,
1521 mergeforce=False,
1519 ):
1522 ):
1520 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1523 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1521 # Avoid cycle.
1524 # Avoid cycle.
1522 from . import sparse
1525 from . import sparse
1523
1526
1524 if len(ancestors) == 1: # default
1527 if len(ancestors) == 1: # default
1525 actions, diverge, renamedelete = manifestmerge(
1528 actions, diverge, renamedelete = manifestmerge(
1526 repo,
1529 repo,
1527 wctx,
1530 wctx,
1528 mctx,
1531 mctx,
1529 ancestors[0],
1532 ancestors[0],
1530 branchmerge,
1533 branchmerge,
1531 force,
1534 force,
1532 matcher,
1535 matcher,
1533 acceptremote,
1536 acceptremote,
1534 followcopies,
1537 followcopies,
1535 )
1538 )
1536 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1539 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1537
1540
1538 else: # only when merge.preferancestor=* - the default
1541 else: # only when merge.preferancestor=* - the default
1539 repo.ui.note(
1542 repo.ui.note(
1540 _(b"note: merging %s and %s using bids from ancestors %s\n")
1543 _(b"note: merging %s and %s using bids from ancestors %s\n")
1541 % (
1544 % (
1542 wctx,
1545 wctx,
1543 mctx,
1546 mctx,
1544 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1547 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1545 )
1548 )
1546 )
1549 )
1547
1550
1548 # Call for bids
1551 # Call for bids
1549 fbids = (
1552 fbids = (
1550 {}
1553 {}
1551 ) # mapping filename to bids (action method to list af actions)
1554 ) # mapping filename to bids (action method to list af actions)
1552 diverge, renamedelete = None, None
1555 diverge, renamedelete = None, None
1553 for ancestor in ancestors:
1556 for ancestor in ancestors:
1554 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1557 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1555 actions, diverge1, renamedelete1 = manifestmerge(
1558 actions, diverge1, renamedelete1 = manifestmerge(
1556 repo,
1559 repo,
1557 wctx,
1560 wctx,
1558 mctx,
1561 mctx,
1559 ancestor,
1562 ancestor,
1560 branchmerge,
1563 branchmerge,
1561 force,
1564 force,
1562 matcher,
1565 matcher,
1563 acceptremote,
1566 acceptremote,
1564 followcopies,
1567 followcopies,
1565 forcefulldiff=True,
1568 forcefulldiff=True,
1566 )
1569 )
1567 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1570 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1568
1571
1569 # Track the shortest set of warning on the theory that bid
1572 # Track the shortest set of warning on the theory that bid
1570 # merge will correctly incorporate more information
1573 # merge will correctly incorporate more information
1571 if diverge is None or len(diverge1) < len(diverge):
1574 if diverge is None or len(diverge1) < len(diverge):
1572 diverge = diverge1
1575 diverge = diverge1
1573 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1576 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1574 renamedelete = renamedelete1
1577 renamedelete = renamedelete1
1575
1578
1576 for f, a in sorted(pycompat.iteritems(actions)):
1579 for f, a in sorted(pycompat.iteritems(actions)):
1577 m, args, msg = a
1580 m, args, msg = a
1578 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1581 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1579 if f in fbids:
1582 if f in fbids:
1580 d = fbids[f]
1583 d = fbids[f]
1581 if m in d:
1584 if m in d:
1582 d[m].append(a)
1585 d[m].append(a)
1583 else:
1586 else:
1584 d[m] = [a]
1587 d[m] = [a]
1585 else:
1588 else:
1586 fbids[f] = {m: [a]}
1589 fbids[f] = {m: [a]}
1587
1590
1588 # Pick the best bid for each file
1591 # Pick the best bid for each file
1589 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1592 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1590 actions = {}
1593 actions = {}
1591 for f, bids in sorted(fbids.items()):
1594 for f, bids in sorted(fbids.items()):
1592 # bids is a mapping from action method to list af actions
1595 # bids is a mapping from action method to list af actions
1593 # Consensus?
1596 # Consensus?
1594 if len(bids) == 1: # all bids are the same kind of method
1597 if len(bids) == 1: # all bids are the same kind of method
1595 m, l = list(bids.items())[0]
1598 m, l = list(bids.items())[0]
1596 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1599 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1597 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1600 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1598 actions[f] = l[0]
1601 actions[f] = l[0]
1599 continue
1602 continue
1600 # If keep is an option, just do it.
1603 # If keep is an option, just do it.
1601 if ACTION_KEEP in bids:
1604 if ACTION_KEEP in bids:
1602 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1605 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1603 actions[f] = bids[ACTION_KEEP][0]
1606 actions[f] = bids[ACTION_KEEP][0]
1604 continue
1607 continue
1605 # If there are gets and they all agree [how could they not?], do it.
1608 # If there are gets and they all agree [how could they not?], do it.
1606 if ACTION_GET in bids:
1609 if ACTION_GET in bids:
1607 ga0 = bids[ACTION_GET][0]
1610 ga0 = bids[ACTION_GET][0]
1608 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1611 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1609 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1612 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1610 actions[f] = ga0
1613 actions[f] = ga0
1611 continue
1614 continue
1612 # TODO: Consider other simple actions such as mode changes
1615 # TODO: Consider other simple actions such as mode changes
1613 # Handle inefficient democrazy.
1616 # Handle inefficient democrazy.
1614 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1617 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1615 for m, l in sorted(bids.items()):
1618 for m, l in sorted(bids.items()):
1616 for _f, args, msg in l:
1619 for _f, args, msg in l:
1617 repo.ui.note(b' %s -> %s\n' % (msg, m))
1620 repo.ui.note(b' %s -> %s\n' % (msg, m))
1618 # Pick random action. TODO: Instead, prompt user when resolving
1621 # Pick random action. TODO: Instead, prompt user when resolving
1619 m, l = list(bids.items())[0]
1622 m, l = list(bids.items())[0]
1620 repo.ui.warn(
1623 repo.ui.warn(
1621 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1624 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1622 )
1625 )
1623 actions[f] = l[0]
1626 actions[f] = l[0]
1624 continue
1627 continue
1625 repo.ui.note(_(b'end of auction\n\n'))
1628 repo.ui.note(_(b'end of auction\n\n'))
1626
1629
1627 if wctx.rev() is None:
1630 if wctx.rev() is None:
1628 fractions = _forgetremoved(wctx, mctx, branchmerge)
1631 fractions = _forgetremoved(wctx, mctx, branchmerge)
1629 actions.update(fractions)
1632 actions.update(fractions)
1630
1633
1631 prunedactions = sparse.filterupdatesactions(
1634 prunedactions = sparse.filterupdatesactions(
1632 repo, wctx, mctx, branchmerge, actions
1635 repo, wctx, mctx, branchmerge, actions
1633 )
1636 )
1634 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1637 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1635
1638
1636 return prunedactions, diverge, renamedelete
1639 return prunedactions, diverge, renamedelete
1637
1640
1638
1641
1639 def _getcwd():
1642 def _getcwd():
1640 try:
1643 try:
1641 return encoding.getcwd()
1644 return encoding.getcwd()
1642 except OSError as err:
1645 except OSError as err:
1643 if err.errno == errno.ENOENT:
1646 if err.errno == errno.ENOENT:
1644 return None
1647 return None
1645 raise
1648 raise
1646
1649
1647
1650
1648 def batchremove(repo, wctx, actions):
1651 def batchremove(repo, wctx, actions):
1649 """apply removes to the working directory
1652 """apply removes to the working directory
1650
1653
1651 yields tuples for progress updates
1654 yields tuples for progress updates
1652 """
1655 """
1653 verbose = repo.ui.verbose
1656 verbose = repo.ui.verbose
1654 cwd = _getcwd()
1657 cwd = _getcwd()
1655 i = 0
1658 i = 0
1656 for f, args, msg in actions:
1659 for f, args, msg in actions:
1657 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1660 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1658 if verbose:
1661 if verbose:
1659 repo.ui.note(_(b"removing %s\n") % f)
1662 repo.ui.note(_(b"removing %s\n") % f)
1660 wctx[f].audit()
1663 wctx[f].audit()
1661 try:
1664 try:
1662 wctx[f].remove(ignoremissing=True)
1665 wctx[f].remove(ignoremissing=True)
1663 except OSError as inst:
1666 except OSError as inst:
1664 repo.ui.warn(
1667 repo.ui.warn(
1665 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1668 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1666 )
1669 )
1667 if i == 100:
1670 if i == 100:
1668 yield i, f
1671 yield i, f
1669 i = 0
1672 i = 0
1670 i += 1
1673 i += 1
1671 if i > 0:
1674 if i > 0:
1672 yield i, f
1675 yield i, f
1673
1676
1674 if cwd and not _getcwd():
1677 if cwd and not _getcwd():
1675 # cwd was removed in the course of removing files; print a helpful
1678 # cwd was removed in the course of removing files; print a helpful
1676 # warning.
1679 # warning.
1677 repo.ui.warn(
1680 repo.ui.warn(
1678 _(
1681 _(
1679 b"current directory was removed\n"
1682 b"current directory was removed\n"
1680 b"(consider changing to repo root: %s)\n"
1683 b"(consider changing to repo root: %s)\n"
1681 )
1684 )
1682 % repo.root
1685 % repo.root
1683 )
1686 )
1684
1687
1685
1688
1686 def batchget(repo, mctx, wctx, wantfiledata, actions):
1689 def batchget(repo, mctx, wctx, wantfiledata, actions):
1687 """apply gets to the working directory
1690 """apply gets to the working directory
1688
1691
1689 mctx is the context to get from
1692 mctx is the context to get from
1690
1693
1691 Yields arbitrarily many (False, tuple) for progress updates, followed by
1694 Yields arbitrarily many (False, tuple) for progress updates, followed by
1692 exactly one (True, filedata). When wantfiledata is false, filedata is an
1695 exactly one (True, filedata). When wantfiledata is false, filedata is an
1693 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1696 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1694 mtime) of the file f written for each action.
1697 mtime) of the file f written for each action.
1695 """
1698 """
1696 filedata = {}
1699 filedata = {}
1697 verbose = repo.ui.verbose
1700 verbose = repo.ui.verbose
1698 fctx = mctx.filectx
1701 fctx = mctx.filectx
1699 ui = repo.ui
1702 ui = repo.ui
1700 i = 0
1703 i = 0
1701 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1704 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1702 for f, (flags, backup), msg in actions:
1705 for f, (flags, backup), msg in actions:
1703 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1706 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1704 if verbose:
1707 if verbose:
1705 repo.ui.note(_(b"getting %s\n") % f)
1708 repo.ui.note(_(b"getting %s\n") % f)
1706
1709
1707 if backup:
1710 if backup:
1708 # If a file or directory exists with the same name, back that
1711 # If a file or directory exists with the same name, back that
1709 # up. Otherwise, look to see if there is a file that conflicts
1712 # up. Otherwise, look to see if there is a file that conflicts
1710 # with a directory this file is in, and if so, back that up.
1713 # with a directory this file is in, and if so, back that up.
1711 conflicting = f
1714 conflicting = f
1712 if not repo.wvfs.lexists(f):
1715 if not repo.wvfs.lexists(f):
1713 for p in pathutil.finddirs(f):
1716 for p in pathutil.finddirs(f):
1714 if repo.wvfs.isfileorlink(p):
1717 if repo.wvfs.isfileorlink(p):
1715 conflicting = p
1718 conflicting = p
1716 break
1719 break
1717 if repo.wvfs.lexists(conflicting):
1720 if repo.wvfs.lexists(conflicting):
1718 orig = scmutil.backuppath(ui, repo, conflicting)
1721 orig = scmutil.backuppath(ui, repo, conflicting)
1719 util.rename(repo.wjoin(conflicting), orig)
1722 util.rename(repo.wjoin(conflicting), orig)
1720 wfctx = wctx[f]
1723 wfctx = wctx[f]
1721 wfctx.clearunknown()
1724 wfctx.clearunknown()
1722 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1725 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1723 size = wfctx.write(
1726 size = wfctx.write(
1724 fctx(f).data(),
1727 fctx(f).data(),
1725 flags,
1728 flags,
1726 backgroundclose=True,
1729 backgroundclose=True,
1727 atomictemp=atomictemp,
1730 atomictemp=atomictemp,
1728 )
1731 )
1729 if wantfiledata:
1732 if wantfiledata:
1730 s = wfctx.lstat()
1733 s = wfctx.lstat()
1731 mode = s.st_mode
1734 mode = s.st_mode
1732 mtime = s[stat.ST_MTIME]
1735 mtime = s[stat.ST_MTIME]
1733 filedata[f] = (mode, size, mtime) # for dirstate.normal
1736 filedata[f] = (mode, size, mtime) # for dirstate.normal
1734 if i == 100:
1737 if i == 100:
1735 yield False, (i, f)
1738 yield False, (i, f)
1736 i = 0
1739 i = 0
1737 i += 1
1740 i += 1
1738 if i > 0:
1741 if i > 0:
1739 yield False, (i, f)
1742 yield False, (i, f)
1740 yield True, filedata
1743 yield True, filedata
1741
1744
1742
1745
1743 def _prefetchfiles(repo, ctx, actions):
1746 def _prefetchfiles(repo, ctx, actions):
1744 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1747 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1745 of merge actions. ``ctx`` is the context being merged in."""
1748 of merge actions. ``ctx`` is the context being merged in."""
1746
1749
1747 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1750 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1748 # don't touch the context to be merged in. 'cd' is skipped, because
1751 # don't touch the context to be merged in. 'cd' is skipped, because
1749 # changed/deleted never resolves to something from the remote side.
1752 # changed/deleted never resolves to something from the remote side.
1750 oplist = [
1753 oplist = [
1751 actions[a]
1754 actions[a]
1752 for a in (
1755 for a in (
1753 ACTION_GET,
1756 ACTION_GET,
1754 ACTION_DELETED_CHANGED,
1757 ACTION_DELETED_CHANGED,
1755 ACTION_LOCAL_DIR_RENAME_GET,
1758 ACTION_LOCAL_DIR_RENAME_GET,
1756 ACTION_MERGE,
1759 ACTION_MERGE,
1757 )
1760 )
1758 ]
1761 ]
1759 prefetch = scmutil.prefetchfiles
1762 prefetch = scmutil.prefetchfiles
1760 matchfiles = scmutil.matchfiles
1763 matchfiles = scmutil.matchfiles
1761 prefetch(
1764 prefetch(
1762 repo,
1765 repo,
1763 [ctx.rev()],
1766 [ctx.rev()],
1764 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1767 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1765 )
1768 )
1766
1769
1767
1770
1768 @attr.s(frozen=True)
1771 @attr.s(frozen=True)
1769 class updateresult(object):
1772 class updateresult(object):
1770 updatedcount = attr.ib()
1773 updatedcount = attr.ib()
1771 mergedcount = attr.ib()
1774 mergedcount = attr.ib()
1772 removedcount = attr.ib()
1775 removedcount = attr.ib()
1773 unresolvedcount = attr.ib()
1776 unresolvedcount = attr.ib()
1774
1777
1775 def isempty(self):
1778 def isempty(self):
1776 return not (
1779 return not (
1777 self.updatedcount
1780 self.updatedcount
1778 or self.mergedcount
1781 or self.mergedcount
1779 or self.removedcount
1782 or self.removedcount
1780 or self.unresolvedcount
1783 or self.unresolvedcount
1781 )
1784 )
1782
1785
1783
1786
1784 def emptyactions():
1787 def emptyactions():
1785 """create an actions dict, to be populated and passed to applyupdates()"""
1788 """create an actions dict, to be populated and passed to applyupdates()"""
1786 return dict(
1789 return dict(
1787 (m, [])
1790 (m, [])
1788 for m in (
1791 for m in (
1789 ACTION_ADD,
1792 ACTION_ADD,
1790 ACTION_ADD_MODIFIED,
1793 ACTION_ADD_MODIFIED,
1791 ACTION_FORGET,
1794 ACTION_FORGET,
1792 ACTION_GET,
1795 ACTION_GET,
1793 ACTION_CHANGED_DELETED,
1796 ACTION_CHANGED_DELETED,
1794 ACTION_DELETED_CHANGED,
1797 ACTION_DELETED_CHANGED,
1795 ACTION_REMOVE,
1798 ACTION_REMOVE,
1796 ACTION_DIR_RENAME_MOVE_LOCAL,
1799 ACTION_DIR_RENAME_MOVE_LOCAL,
1797 ACTION_LOCAL_DIR_RENAME_GET,
1800 ACTION_LOCAL_DIR_RENAME_GET,
1798 ACTION_MERGE,
1801 ACTION_MERGE,
1799 ACTION_EXEC,
1802 ACTION_EXEC,
1800 ACTION_KEEP,
1803 ACTION_KEEP,
1801 ACTION_PATH_CONFLICT,
1804 ACTION_PATH_CONFLICT,
1802 ACTION_PATH_CONFLICT_RESOLVE,
1805 ACTION_PATH_CONFLICT_RESOLVE,
1803 )
1806 )
1804 )
1807 )
1805
1808
1806
1809
1807 def applyupdates(
1810 def applyupdates(
1808 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1811 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1809 ):
1812 ):
1810 """apply the merge action list to the working directory
1813 """apply the merge action list to the working directory
1811
1814
1812 wctx is the working copy context
1815 wctx is the working copy context
1813 mctx is the context to be merged into the working copy
1816 mctx is the context to be merged into the working copy
1814
1817
1815 Return a tuple of (counts, filedata), where counts is a tuple
1818 Return a tuple of (counts, filedata), where counts is a tuple
1816 (updated, merged, removed, unresolved) that describes how many
1819 (updated, merged, removed, unresolved) that describes how many
1817 files were affected by the update, and filedata is as described in
1820 files were affected by the update, and filedata is as described in
1818 batchget.
1821 batchget.
1819 """
1822 """
1820
1823
1821 _prefetchfiles(repo, mctx, actions)
1824 _prefetchfiles(repo, mctx, actions)
1822
1825
1823 updated, merged, removed = 0, 0, 0
1826 updated, merged, removed = 0, 0, 0
1824 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1827 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1825 moves = []
1828 moves = []
1826 for m, l in actions.items():
1829 for m, l in actions.items():
1827 l.sort()
1830 l.sort()
1828
1831
1829 # 'cd' and 'dc' actions are treated like other merge conflicts
1832 # 'cd' and 'dc' actions are treated like other merge conflicts
1830 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1833 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1831 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1834 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1832 mergeactions.extend(actions[ACTION_MERGE])
1835 mergeactions.extend(actions[ACTION_MERGE])
1833 for f, args, msg in mergeactions:
1836 for f, args, msg in mergeactions:
1834 f1, f2, fa, move, anc = args
1837 f1, f2, fa, move, anc = args
1835 if f == b'.hgsubstate': # merged internally
1838 if f == b'.hgsubstate': # merged internally
1836 continue
1839 continue
1837 if f1 is None:
1840 if f1 is None:
1838 fcl = filemerge.absentfilectx(wctx, fa)
1841 fcl = filemerge.absentfilectx(wctx, fa)
1839 else:
1842 else:
1840 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1843 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1841 fcl = wctx[f1]
1844 fcl = wctx[f1]
1842 if f2 is None:
1845 if f2 is None:
1843 fco = filemerge.absentfilectx(mctx, fa)
1846 fco = filemerge.absentfilectx(mctx, fa)
1844 else:
1847 else:
1845 fco = mctx[f2]
1848 fco = mctx[f2]
1846 actx = repo[anc]
1849 actx = repo[anc]
1847 if fa in actx:
1850 if fa in actx:
1848 fca = actx[fa]
1851 fca = actx[fa]
1849 else:
1852 else:
1850 # TODO: move to absentfilectx
1853 # TODO: move to absentfilectx
1851 fca = repo.filectx(f1, fileid=nullrev)
1854 fca = repo.filectx(f1, fileid=nullrev)
1852 ms.add(fcl, fco, fca, f)
1855 ms.add(fcl, fco, fca, f)
1853 if f1 != f and move:
1856 if f1 != f and move:
1854 moves.append(f1)
1857 moves.append(f1)
1855
1858
1856 # remove renamed files after safely stored
1859 # remove renamed files after safely stored
1857 for f in moves:
1860 for f in moves:
1858 if wctx[f].lexists():
1861 if wctx[f].lexists():
1859 repo.ui.debug(b"removing %s\n" % f)
1862 repo.ui.debug(b"removing %s\n" % f)
1860 wctx[f].audit()
1863 wctx[f].audit()
1861 wctx[f].remove()
1864 wctx[f].remove()
1862
1865
1863 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1866 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1864 progress = repo.ui.makeprogress(
1867 progress = repo.ui.makeprogress(
1865 _(b'updating'), unit=_(b'files'), total=numupdates
1868 _(b'updating'), unit=_(b'files'), total=numupdates
1866 )
1869 )
1867
1870
1868 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1871 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1869 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1872 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1870
1873
1871 # record path conflicts
1874 # record path conflicts
1872 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1875 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1873 f1, fo = args
1876 f1, fo = args
1874 s = repo.ui.status
1877 s = repo.ui.status
1875 s(
1878 s(
1876 _(
1879 _(
1877 b"%s: path conflict - a file or link has the same name as a "
1880 b"%s: path conflict - a file or link has the same name as a "
1878 b"directory\n"
1881 b"directory\n"
1879 )
1882 )
1880 % f
1883 % f
1881 )
1884 )
1882 if fo == b'l':
1885 if fo == b'l':
1883 s(_(b"the local file has been renamed to %s\n") % f1)
1886 s(_(b"the local file has been renamed to %s\n") % f1)
1884 else:
1887 else:
1885 s(_(b"the remote file has been renamed to %s\n") % f1)
1888 s(_(b"the remote file has been renamed to %s\n") % f1)
1886 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1889 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1887 ms.addpath(f, f1, fo)
1890 ms.addpath(f, f1, fo)
1888 progress.increment(item=f)
1891 progress.increment(item=f)
1889
1892
1890 # When merging in-memory, we can't support worker processes, so set the
1893 # When merging in-memory, we can't support worker processes, so set the
1891 # per-item cost at 0 in that case.
1894 # per-item cost at 0 in that case.
1892 cost = 0 if wctx.isinmemory() else 0.001
1895 cost = 0 if wctx.isinmemory() else 0.001
1893
1896
1894 # remove in parallel (must come before resolving path conflicts and getting)
1897 # remove in parallel (must come before resolving path conflicts and getting)
1895 prog = worker.worker(
1898 prog = worker.worker(
1896 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1899 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1897 )
1900 )
1898 for i, item in prog:
1901 for i, item in prog:
1899 progress.increment(step=i, item=item)
1902 progress.increment(step=i, item=item)
1900 removed = len(actions[ACTION_REMOVE])
1903 removed = len(actions[ACTION_REMOVE])
1901
1904
1902 # resolve path conflicts (must come before getting)
1905 # resolve path conflicts (must come before getting)
1903 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1906 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1904 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1907 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1905 (f0,) = args
1908 (f0,) = args
1906 if wctx[f0].lexists():
1909 if wctx[f0].lexists():
1907 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1910 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1908 wctx[f].audit()
1911 wctx[f].audit()
1909 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1912 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1910 wctx[f0].remove()
1913 wctx[f0].remove()
1911 progress.increment(item=f)
1914 progress.increment(item=f)
1912
1915
1913 # get in parallel.
1916 # get in parallel.
1914 threadsafe = repo.ui.configbool(
1917 threadsafe = repo.ui.configbool(
1915 b'experimental', b'worker.wdir-get-thread-safe'
1918 b'experimental', b'worker.wdir-get-thread-safe'
1916 )
1919 )
1917 prog = worker.worker(
1920 prog = worker.worker(
1918 repo.ui,
1921 repo.ui,
1919 cost,
1922 cost,
1920 batchget,
1923 batchget,
1921 (repo, mctx, wctx, wantfiledata),
1924 (repo, mctx, wctx, wantfiledata),
1922 actions[ACTION_GET],
1925 actions[ACTION_GET],
1923 threadsafe=threadsafe,
1926 threadsafe=threadsafe,
1924 hasretval=True,
1927 hasretval=True,
1925 )
1928 )
1926 getfiledata = {}
1929 getfiledata = {}
1927 for final, res in prog:
1930 for final, res in prog:
1928 if final:
1931 if final:
1929 getfiledata = res
1932 getfiledata = res
1930 else:
1933 else:
1931 i, item = res
1934 i, item = res
1932 progress.increment(step=i, item=item)
1935 progress.increment(step=i, item=item)
1933 updated = len(actions[ACTION_GET])
1936 updated = len(actions[ACTION_GET])
1934
1937
1935 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1938 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1936 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1939 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1937
1940
1938 # forget (manifest only, just log it) (must come first)
1941 # forget (manifest only, just log it) (must come first)
1939 for f, args, msg in actions[ACTION_FORGET]:
1942 for f, args, msg in actions[ACTION_FORGET]:
1940 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1943 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1941 progress.increment(item=f)
1944 progress.increment(item=f)
1942
1945
1943 # re-add (manifest only, just log it)
1946 # re-add (manifest only, just log it)
1944 for f, args, msg in actions[ACTION_ADD]:
1947 for f, args, msg in actions[ACTION_ADD]:
1945 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1948 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1946 progress.increment(item=f)
1949 progress.increment(item=f)
1947
1950
1948 # re-add/mark as modified (manifest only, just log it)
1951 # re-add/mark as modified (manifest only, just log it)
1949 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1952 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1950 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1953 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1951 progress.increment(item=f)
1954 progress.increment(item=f)
1952
1955
1953 # keep (noop, just log it)
1956 # keep (noop, just log it)
1954 for f, args, msg in actions[ACTION_KEEP]:
1957 for f, args, msg in actions[ACTION_KEEP]:
1955 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1958 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1956 # no progress
1959 # no progress
1957
1960
1958 # directory rename, move local
1961 # directory rename, move local
1959 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1962 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1960 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1963 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1961 progress.increment(item=f)
1964 progress.increment(item=f)
1962 f0, flags = args
1965 f0, flags = args
1963 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1966 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1964 wctx[f].audit()
1967 wctx[f].audit()
1965 wctx[f].write(wctx.filectx(f0).data(), flags)
1968 wctx[f].write(wctx.filectx(f0).data(), flags)
1966 wctx[f0].remove()
1969 wctx[f0].remove()
1967 updated += 1
1970 updated += 1
1968
1971
1969 # local directory rename, get
1972 # local directory rename, get
1970 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1973 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1971 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1974 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1972 progress.increment(item=f)
1975 progress.increment(item=f)
1973 f0, flags = args
1976 f0, flags = args
1974 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1977 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1975 wctx[f].write(mctx.filectx(f0).data(), flags)
1978 wctx[f].write(mctx.filectx(f0).data(), flags)
1976 updated += 1
1979 updated += 1
1977
1980
1978 # exec
1981 # exec
1979 for f, args, msg in actions[ACTION_EXEC]:
1982 for f, args, msg in actions[ACTION_EXEC]:
1980 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1983 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1981 progress.increment(item=f)
1984 progress.increment(item=f)
1982 (flags,) = args
1985 (flags,) = args
1983 wctx[f].audit()
1986 wctx[f].audit()
1984 wctx[f].setflags(b'l' in flags, b'x' in flags)
1987 wctx[f].setflags(b'l' in flags, b'x' in flags)
1985 updated += 1
1988 updated += 1
1986
1989
1987 # the ordering is important here -- ms.mergedriver will raise if the merge
1990 # the ordering is important here -- ms.mergedriver will raise if the merge
1988 # driver has changed, and we want to be able to bypass it when overwrite is
1991 # driver has changed, and we want to be able to bypass it when overwrite is
1989 # True
1992 # True
1990 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1993 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1991
1994
1992 if usemergedriver:
1995 if usemergedriver:
1993 if wctx.isinmemory():
1996 if wctx.isinmemory():
1994 raise error.InMemoryMergeConflictsError(
1997 raise error.InMemoryMergeConflictsError(
1995 b"in-memory merge does not support mergedriver"
1998 b"in-memory merge does not support mergedriver"
1996 )
1999 )
1997 ms.commit()
2000 ms.commit()
1998 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2001 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1999 # the driver might leave some files unresolved
2002 # the driver might leave some files unresolved
2000 unresolvedf = set(ms.unresolved())
2003 unresolvedf = set(ms.unresolved())
2001 if not proceed:
2004 if not proceed:
2002 # XXX setting unresolved to at least 1 is a hack to make sure we
2005 # XXX setting unresolved to at least 1 is a hack to make sure we
2003 # error out
2006 # error out
2004 return updateresult(
2007 return updateresult(
2005 updated, merged, removed, max(len(unresolvedf), 1)
2008 updated, merged, removed, max(len(unresolvedf), 1)
2006 )
2009 )
2007 newactions = []
2010 newactions = []
2008 for f, args, msg in mergeactions:
2011 for f, args, msg in mergeactions:
2009 if f in unresolvedf:
2012 if f in unresolvedf:
2010 newactions.append((f, args, msg))
2013 newactions.append((f, args, msg))
2011 mergeactions = newactions
2014 mergeactions = newactions
2012
2015
2013 try:
2016 try:
2014 # premerge
2017 # premerge
2015 tocomplete = []
2018 tocomplete = []
2016 for f, args, msg in mergeactions:
2019 for f, args, msg in mergeactions:
2017 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2020 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2018 progress.increment(item=f)
2021 progress.increment(item=f)
2019 if f == b'.hgsubstate': # subrepo states need updating
2022 if f == b'.hgsubstate': # subrepo states need updating
2020 subrepoutil.submerge(
2023 subrepoutil.submerge(
2021 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2024 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2022 )
2025 )
2023 continue
2026 continue
2024 wctx[f].audit()
2027 wctx[f].audit()
2025 complete, r = ms.preresolve(f, wctx)
2028 complete, r = ms.preresolve(f, wctx)
2026 if not complete:
2029 if not complete:
2027 numupdates += 1
2030 numupdates += 1
2028 tocomplete.append((f, args, msg))
2031 tocomplete.append((f, args, msg))
2029
2032
2030 # merge
2033 # merge
2031 for f, args, msg in tocomplete:
2034 for f, args, msg in tocomplete:
2032 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2035 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2033 progress.increment(item=f, total=numupdates)
2036 progress.increment(item=f, total=numupdates)
2034 ms.resolve(f, wctx)
2037 ms.resolve(f, wctx)
2035
2038
2036 finally:
2039 finally:
2037 ms.commit()
2040 ms.commit()
2038
2041
2039 unresolved = ms.unresolvedcount()
2042 unresolved = ms.unresolvedcount()
2040
2043
2041 if (
2044 if (
2042 usemergedriver
2045 usemergedriver
2043 and not unresolved
2046 and not unresolved
2044 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2047 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2045 ):
2048 ):
2046 if not driverconclude(repo, ms, wctx, labels=labels):
2049 if not driverconclude(repo, ms, wctx, labels=labels):
2047 # XXX setting unresolved to at least 1 is a hack to make sure we
2050 # XXX setting unresolved to at least 1 is a hack to make sure we
2048 # error out
2051 # error out
2049 unresolved = max(unresolved, 1)
2052 unresolved = max(unresolved, 1)
2050
2053
2051 ms.commit()
2054 ms.commit()
2052
2055
2053 msupdated, msmerged, msremoved = ms.counts()
2056 msupdated, msmerged, msremoved = ms.counts()
2054 updated += msupdated
2057 updated += msupdated
2055 merged += msmerged
2058 merged += msmerged
2056 removed += msremoved
2059 removed += msremoved
2057
2060
2058 extraactions = ms.actions()
2061 extraactions = ms.actions()
2059 if extraactions:
2062 if extraactions:
2060 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2063 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2061 for k, acts in pycompat.iteritems(extraactions):
2064 for k, acts in pycompat.iteritems(extraactions):
2062 actions[k].extend(acts)
2065 actions[k].extend(acts)
2063 if k == ACTION_GET and wantfiledata:
2066 if k == ACTION_GET and wantfiledata:
2064 # no filedata until mergestate is updated to provide it
2067 # no filedata until mergestate is updated to provide it
2065 for a in acts:
2068 for a in acts:
2066 getfiledata[a[0]] = None
2069 getfiledata[a[0]] = None
2067 # Remove these files from actions[ACTION_MERGE] as well. This is
2070 # Remove these files from actions[ACTION_MERGE] as well. This is
2068 # important because in recordupdates, files in actions[ACTION_MERGE]
2071 # important because in recordupdates, files in actions[ACTION_MERGE]
2069 # are processed after files in other actions, and the merge driver
2072 # are processed after files in other actions, and the merge driver
2070 # might add files to those actions via extraactions above. This can
2073 # might add files to those actions via extraactions above. This can
2071 # lead to a file being recorded twice, with poor results. This is
2074 # lead to a file being recorded twice, with poor results. This is
2072 # especially problematic for actions[ACTION_REMOVE] (currently only
2075 # especially problematic for actions[ACTION_REMOVE] (currently only
2073 # possible with the merge driver in the initial merge process;
2076 # possible with the merge driver in the initial merge process;
2074 # interrupted merges don't go through this flow).
2077 # interrupted merges don't go through this flow).
2075 #
2078 #
2076 # The real fix here is to have indexes by both file and action so
2079 # The real fix here is to have indexes by both file and action so
2077 # that when the action for a file is changed it is automatically
2080 # that when the action for a file is changed it is automatically
2078 # reflected in the other action lists. But that involves a more
2081 # reflected in the other action lists. But that involves a more
2079 # complex data structure, so this will do for now.
2082 # complex data structure, so this will do for now.
2080 #
2083 #
2081 # We don't need to do the same operation for 'dc' and 'cd' because
2084 # We don't need to do the same operation for 'dc' and 'cd' because
2082 # those lists aren't consulted again.
2085 # those lists aren't consulted again.
2083 mfiles.difference_update(a[0] for a in acts)
2086 mfiles.difference_update(a[0] for a in acts)
2084
2087
2085 actions[ACTION_MERGE] = [
2088 actions[ACTION_MERGE] = [
2086 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2089 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2087 ]
2090 ]
2088
2091
2089 progress.complete()
2092 progress.complete()
2090 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2093 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2091 return updateresult(updated, merged, removed, unresolved), getfiledata
2094 return updateresult(updated, merged, removed, unresolved), getfiledata
2092
2095
2093
2096
2094 def recordupdates(repo, actions, branchmerge, getfiledata):
2097 def recordupdates(repo, actions, branchmerge, getfiledata):
2095 """record merge actions to the dirstate"""
2098 """record merge actions to the dirstate"""
2096 # remove (must come first)
2099 # remove (must come first)
2097 for f, args, msg in actions.get(ACTION_REMOVE, []):
2100 for f, args, msg in actions.get(ACTION_REMOVE, []):
2098 if branchmerge:
2101 if branchmerge:
2099 repo.dirstate.remove(f)
2102 repo.dirstate.remove(f)
2100 else:
2103 else:
2101 repo.dirstate.drop(f)
2104 repo.dirstate.drop(f)
2102
2105
2103 # forget (must come first)
2106 # forget (must come first)
2104 for f, args, msg in actions.get(ACTION_FORGET, []):
2107 for f, args, msg in actions.get(ACTION_FORGET, []):
2105 repo.dirstate.drop(f)
2108 repo.dirstate.drop(f)
2106
2109
2107 # resolve path conflicts
2110 # resolve path conflicts
2108 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2111 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2109 (f0,) = args
2112 (f0,) = args
2110 origf0 = repo.dirstate.copied(f0) or f0
2113 origf0 = repo.dirstate.copied(f0) or f0
2111 repo.dirstate.add(f)
2114 repo.dirstate.add(f)
2112 repo.dirstate.copy(origf0, f)
2115 repo.dirstate.copy(origf0, f)
2113 if f0 == origf0:
2116 if f0 == origf0:
2114 repo.dirstate.remove(f0)
2117 repo.dirstate.remove(f0)
2115 else:
2118 else:
2116 repo.dirstate.drop(f0)
2119 repo.dirstate.drop(f0)
2117
2120
2118 # re-add
2121 # re-add
2119 for f, args, msg in actions.get(ACTION_ADD, []):
2122 for f, args, msg in actions.get(ACTION_ADD, []):
2120 repo.dirstate.add(f)
2123 repo.dirstate.add(f)
2121
2124
2122 # re-add/mark as modified
2125 # re-add/mark as modified
2123 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2126 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2124 if branchmerge:
2127 if branchmerge:
2125 repo.dirstate.normallookup(f)
2128 repo.dirstate.normallookup(f)
2126 else:
2129 else:
2127 repo.dirstate.add(f)
2130 repo.dirstate.add(f)
2128
2131
2129 # exec change
2132 # exec change
2130 for f, args, msg in actions.get(ACTION_EXEC, []):
2133 for f, args, msg in actions.get(ACTION_EXEC, []):
2131 repo.dirstate.normallookup(f)
2134 repo.dirstate.normallookup(f)
2132
2135
2133 # keep
2136 # keep
2134 for f, args, msg in actions.get(ACTION_KEEP, []):
2137 for f, args, msg in actions.get(ACTION_KEEP, []):
2135 pass
2138 pass
2136
2139
2137 # get
2140 # get
2138 for f, args, msg in actions.get(ACTION_GET, []):
2141 for f, args, msg in actions.get(ACTION_GET, []):
2139 if branchmerge:
2142 if branchmerge:
2140 repo.dirstate.otherparent(f)
2143 repo.dirstate.otherparent(f)
2141 else:
2144 else:
2142 parentfiledata = getfiledata[f] if getfiledata else None
2145 parentfiledata = getfiledata[f] if getfiledata else None
2143 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2146 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2144
2147
2145 # merge
2148 # merge
2146 for f, args, msg in actions.get(ACTION_MERGE, []):
2149 for f, args, msg in actions.get(ACTION_MERGE, []):
2147 f1, f2, fa, move, anc = args
2150 f1, f2, fa, move, anc = args
2148 if branchmerge:
2151 if branchmerge:
2149 # We've done a branch merge, mark this file as merged
2152 # We've done a branch merge, mark this file as merged
2150 # so that we properly record the merger later
2153 # so that we properly record the merger later
2151 repo.dirstate.merge(f)
2154 repo.dirstate.merge(f)
2152 if f1 != f2: # copy/rename
2155 if f1 != f2: # copy/rename
2153 if move:
2156 if move:
2154 repo.dirstate.remove(f1)
2157 repo.dirstate.remove(f1)
2155 if f1 != f:
2158 if f1 != f:
2156 repo.dirstate.copy(f1, f)
2159 repo.dirstate.copy(f1, f)
2157 else:
2160 else:
2158 repo.dirstate.copy(f2, f)
2161 repo.dirstate.copy(f2, f)
2159 else:
2162 else:
2160 # We've update-merged a locally modified file, so
2163 # We've update-merged a locally modified file, so
2161 # we set the dirstate to emulate a normal checkout
2164 # we set the dirstate to emulate a normal checkout
2162 # of that file some time in the past. Thus our
2165 # of that file some time in the past. Thus our
2163 # merge will appear as a normal local file
2166 # merge will appear as a normal local file
2164 # modification.
2167 # modification.
2165 if f2 == f: # file not locally copied/moved
2168 if f2 == f: # file not locally copied/moved
2166 repo.dirstate.normallookup(f)
2169 repo.dirstate.normallookup(f)
2167 if move:
2170 if move:
2168 repo.dirstate.drop(f1)
2171 repo.dirstate.drop(f1)
2169
2172
2170 # directory rename, move local
2173 # directory rename, move local
2171 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2174 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2172 f0, flag = args
2175 f0, flag = args
2173 if branchmerge:
2176 if branchmerge:
2174 repo.dirstate.add(f)
2177 repo.dirstate.add(f)
2175 repo.dirstate.remove(f0)
2178 repo.dirstate.remove(f0)
2176 repo.dirstate.copy(f0, f)
2179 repo.dirstate.copy(f0, f)
2177 else:
2180 else:
2178 repo.dirstate.normal(f)
2181 repo.dirstate.normal(f)
2179 repo.dirstate.drop(f0)
2182 repo.dirstate.drop(f0)
2180
2183
2181 # directory rename, get
2184 # directory rename, get
2182 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2185 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2183 f0, flag = args
2186 f0, flag = args
2184 if branchmerge:
2187 if branchmerge:
2185 repo.dirstate.add(f)
2188 repo.dirstate.add(f)
2186 repo.dirstate.copy(f0, f)
2189 repo.dirstate.copy(f0, f)
2187 else:
2190 else:
2188 repo.dirstate.normal(f)
2191 repo.dirstate.normal(f)
2189
2192
2190
2193
2191 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2194 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2192 UPDATECHECK_NONE = b'none'
2195 UPDATECHECK_NONE = b'none'
2193 UPDATECHECK_LINEAR = b'linear'
2196 UPDATECHECK_LINEAR = b'linear'
2194 UPDATECHECK_NO_CONFLICT = b'noconflict'
2197 UPDATECHECK_NO_CONFLICT = b'noconflict'
2195
2198
2196
2199
2197 def update(
2200 def update(
2198 repo,
2201 repo,
2199 node,
2202 node,
2200 branchmerge,
2203 branchmerge,
2201 force,
2204 force,
2202 ancestor=None,
2205 ancestor=None,
2203 mergeancestor=False,
2206 mergeancestor=False,
2204 labels=None,
2207 labels=None,
2205 matcher=None,
2208 matcher=None,
2206 mergeforce=False,
2209 mergeforce=False,
2207 updatecheck=None,
2210 updatecheck=None,
2208 wc=None,
2211 wc=None,
2209 ):
2212 ):
2210 """
2213 """
2211 Perform a merge between the working directory and the given node
2214 Perform a merge between the working directory and the given node
2212
2215
2213 node = the node to update to
2216 node = the node to update to
2214 branchmerge = whether to merge between branches
2217 branchmerge = whether to merge between branches
2215 force = whether to force branch merging or file overwriting
2218 force = whether to force branch merging or file overwriting
2216 matcher = a matcher to filter file lists (dirstate not updated)
2219 matcher = a matcher to filter file lists (dirstate not updated)
2217 mergeancestor = whether it is merging with an ancestor. If true,
2220 mergeancestor = whether it is merging with an ancestor. If true,
2218 we should accept the incoming changes for any prompts that occur.
2221 we should accept the incoming changes for any prompts that occur.
2219 If false, merging with an ancestor (fast-forward) is only allowed
2222 If false, merging with an ancestor (fast-forward) is only allowed
2220 between different named branches. This flag is used by rebase extension
2223 between different named branches. This flag is used by rebase extension
2221 as a temporary fix and should be avoided in general.
2224 as a temporary fix and should be avoided in general.
2222 labels = labels to use for base, local and other
2225 labels = labels to use for base, local and other
2223 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2226 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2224 this is True, then 'force' should be True as well.
2227 this is True, then 'force' should be True as well.
2225
2228
2226 The table below shows all the behaviors of the update command given the
2229 The table below shows all the behaviors of the update command given the
2227 -c/--check and -C/--clean or no options, whether the working directory is
2230 -c/--check and -C/--clean or no options, whether the working directory is
2228 dirty, whether a revision is specified, and the relationship of the parent
2231 dirty, whether a revision is specified, and the relationship of the parent
2229 rev to the target rev (linear or not). Match from top first. The -n
2232 rev to the target rev (linear or not). Match from top first. The -n
2230 option doesn't exist on the command line, but represents the
2233 option doesn't exist on the command line, but represents the
2231 experimental.updatecheck=noconflict option.
2234 experimental.updatecheck=noconflict option.
2232
2235
2233 This logic is tested by test-update-branches.t.
2236 This logic is tested by test-update-branches.t.
2234
2237
2235 -c -C -n -m dirty rev linear | result
2238 -c -C -n -m dirty rev linear | result
2236 y y * * * * * | (1)
2239 y y * * * * * | (1)
2237 y * y * * * * | (1)
2240 y * y * * * * | (1)
2238 y * * y * * * | (1)
2241 y * * y * * * | (1)
2239 * y y * * * * | (1)
2242 * y y * * * * | (1)
2240 * y * y * * * | (1)
2243 * y * y * * * | (1)
2241 * * y y * * * | (1)
2244 * * y y * * * | (1)
2242 * * * * * n n | x
2245 * * * * * n n | x
2243 * * * * n * * | ok
2246 * * * * n * * | ok
2244 n n n n y * y | merge
2247 n n n n y * y | merge
2245 n n n n y y n | (2)
2248 n n n n y y n | (2)
2246 n n n y y * * | merge
2249 n n n y y * * | merge
2247 n n y n y * * | merge if no conflict
2250 n n y n y * * | merge if no conflict
2248 n y n n y * * | discard
2251 n y n n y * * | discard
2249 y n n n y * * | (3)
2252 y n n n y * * | (3)
2250
2253
2251 x = can't happen
2254 x = can't happen
2252 * = don't-care
2255 * = don't-care
2253 1 = incompatible options (checked in commands.py)
2256 1 = incompatible options (checked in commands.py)
2254 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2257 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2255 3 = abort: uncommitted changes (checked in commands.py)
2258 3 = abort: uncommitted changes (checked in commands.py)
2256
2259
2257 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2260 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2258 to repo[None] if None is passed.
2261 to repo[None] if None is passed.
2259
2262
2260 Return the same tuple as applyupdates().
2263 Return the same tuple as applyupdates().
2261 """
2264 """
2262 # Avoid cycle.
2265 # Avoid cycle.
2263 from . import sparse
2266 from . import sparse
2264
2267
2265 # This function used to find the default destination if node was None, but
2268 # This function used to find the default destination if node was None, but
2266 # that's now in destutil.py.
2269 # that's now in destutil.py.
2267 assert node is not None
2270 assert node is not None
2268 if not branchmerge and not force:
2271 if not branchmerge and not force:
2269 # TODO: remove the default once all callers that pass branchmerge=False
2272 # TODO: remove the default once all callers that pass branchmerge=False
2270 # and force=False pass a value for updatecheck. We may want to allow
2273 # and force=False pass a value for updatecheck. We may want to allow
2271 # updatecheck='abort' to better suppport some of these callers.
2274 # updatecheck='abort' to better suppport some of these callers.
2272 if updatecheck is None:
2275 if updatecheck is None:
2273 updatecheck = UPDATECHECK_LINEAR
2276 updatecheck = UPDATECHECK_LINEAR
2274 if updatecheck not in (
2277 if updatecheck not in (
2275 UPDATECHECK_NONE,
2278 UPDATECHECK_NONE,
2276 UPDATECHECK_LINEAR,
2279 UPDATECHECK_LINEAR,
2277 UPDATECHECK_NO_CONFLICT,
2280 UPDATECHECK_NO_CONFLICT,
2278 ):
2281 ):
2279 raise ValueError(
2282 raise ValueError(
2280 r'Invalid updatecheck %r (can accept %r)'
2283 r'Invalid updatecheck %r (can accept %r)'
2281 % (
2284 % (
2282 updatecheck,
2285 updatecheck,
2283 (
2286 (
2284 UPDATECHECK_NONE,
2287 UPDATECHECK_NONE,
2285 UPDATECHECK_LINEAR,
2288 UPDATECHECK_LINEAR,
2286 UPDATECHECK_NO_CONFLICT,
2289 UPDATECHECK_NO_CONFLICT,
2287 ),
2290 ),
2288 )
2291 )
2289 )
2292 )
2290 with repo.wlock():
2293 with repo.wlock():
2291 if wc is None:
2294 if wc is None:
2292 wc = repo[None]
2295 wc = repo[None]
2293 pl = wc.parents()
2296 pl = wc.parents()
2294 p1 = pl[0]
2297 p1 = pl[0]
2295 p2 = repo[node]
2298 p2 = repo[node]
2296 if ancestor is not None:
2299 if ancestor is not None:
2297 pas = [repo[ancestor]]
2300 pas = [repo[ancestor]]
2298 else:
2301 else:
2299 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2302 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2300 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2303 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2301 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2304 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2302 else:
2305 else:
2303 pas = [p1.ancestor(p2, warn=branchmerge)]
2306 pas = [p1.ancestor(p2, warn=branchmerge)]
2304
2307
2305 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2308 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2306
2309
2307 overwrite = force and not branchmerge
2310 overwrite = force and not branchmerge
2308 ### check phase
2311 ### check phase
2309 if not overwrite:
2312 if not overwrite:
2310 if len(pl) > 1:
2313 if len(pl) > 1:
2311 raise error.Abort(_(b"outstanding uncommitted merge"))
2314 raise error.Abort(_(b"outstanding uncommitted merge"))
2312 ms = mergestate.read(repo)
2315 ms = mergestate.read(repo)
2313 if list(ms.unresolved()):
2316 if list(ms.unresolved()):
2314 raise error.Abort(
2317 raise error.Abort(
2315 _(b"outstanding merge conflicts"),
2318 _(b"outstanding merge conflicts"),
2316 hint=_(b"use 'hg resolve' to resolve"),
2319 hint=_(b"use 'hg resolve' to resolve"),
2317 )
2320 )
2318 if branchmerge:
2321 if branchmerge:
2319 if pas == [p2]:
2322 if pas == [p2]:
2320 raise error.Abort(
2323 raise error.Abort(
2321 _(
2324 _(
2322 b"merging with a working directory ancestor"
2325 b"merging with a working directory ancestor"
2323 b" has no effect"
2326 b" has no effect"
2324 )
2327 )
2325 )
2328 )
2326 elif pas == [p1]:
2329 elif pas == [p1]:
2327 if not mergeancestor and wc.branch() == p2.branch():
2330 if not mergeancestor and wc.branch() == p2.branch():
2328 raise error.Abort(
2331 raise error.Abort(
2329 _(b"nothing to merge"),
2332 _(b"nothing to merge"),
2330 hint=_(b"use 'hg update' or check 'hg heads'"),
2333 hint=_(b"use 'hg update' or check 'hg heads'"),
2331 )
2334 )
2332 if not force and (wc.files() or wc.deleted()):
2335 if not force and (wc.files() or wc.deleted()):
2333 raise error.Abort(
2336 raise error.Abort(
2334 _(b"uncommitted changes"),
2337 _(b"uncommitted changes"),
2335 hint=_(b"use 'hg status' to list changes"),
2338 hint=_(b"use 'hg status' to list changes"),
2336 )
2339 )
2337 if not wc.isinmemory():
2340 if not wc.isinmemory():
2338 for s in sorted(wc.substate):
2341 for s in sorted(wc.substate):
2339 wc.sub(s).bailifchanged()
2342 wc.sub(s).bailifchanged()
2340
2343
2341 elif not overwrite:
2344 elif not overwrite:
2342 if p1 == p2: # no-op update
2345 if p1 == p2: # no-op update
2343 # call the hooks and exit early
2346 # call the hooks and exit early
2344 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2347 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2345 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2348 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2346 return updateresult(0, 0, 0, 0)
2349 return updateresult(0, 0, 0, 0)
2347
2350
2348 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2351 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2349 [p1],
2352 [p1],
2350 [p2],
2353 [p2],
2351 ): # nonlinear
2354 ): # nonlinear
2352 dirty = wc.dirty(missing=True)
2355 dirty = wc.dirty(missing=True)
2353 if dirty:
2356 if dirty:
2354 # Branching is a bit strange to ensure we do the minimal
2357 # Branching is a bit strange to ensure we do the minimal
2355 # amount of call to obsutil.foreground.
2358 # amount of call to obsutil.foreground.
2356 foreground = obsutil.foreground(repo, [p1.node()])
2359 foreground = obsutil.foreground(repo, [p1.node()])
2357 # note: the <node> variable contains a random identifier
2360 # note: the <node> variable contains a random identifier
2358 if repo[node].node() in foreground:
2361 if repo[node].node() in foreground:
2359 pass # allow updating to successors
2362 pass # allow updating to successors
2360 else:
2363 else:
2361 msg = _(b"uncommitted changes")
2364 msg = _(b"uncommitted changes")
2362 hint = _(b"commit or update --clean to discard changes")
2365 hint = _(b"commit or update --clean to discard changes")
2363 raise error.UpdateAbort(msg, hint=hint)
2366 raise error.UpdateAbort(msg, hint=hint)
2364 else:
2367 else:
2365 # Allow jumping branches if clean and specific rev given
2368 # Allow jumping branches if clean and specific rev given
2366 pass
2369 pass
2367
2370
2368 if overwrite:
2371 if overwrite:
2369 pas = [wc]
2372 pas = [wc]
2370 elif not branchmerge:
2373 elif not branchmerge:
2371 pas = [p1]
2374 pas = [p1]
2372
2375
2373 # deprecated config: merge.followcopies
2376 # deprecated config: merge.followcopies
2374 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2377 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2375 if overwrite:
2378 if overwrite:
2376 followcopies = False
2379 followcopies = False
2377 elif not pas[0]:
2380 elif not pas[0]:
2378 followcopies = False
2381 followcopies = False
2379 if not branchmerge and not wc.dirty(missing=True):
2382 if not branchmerge and not wc.dirty(missing=True):
2380 followcopies = False
2383 followcopies = False
2381
2384
2382 ### calculate phase
2385 ### calculate phase
2383 actionbyfile, diverge, renamedelete = calculateupdates(
2386 actionbyfile, diverge, renamedelete = calculateupdates(
2384 repo,
2387 repo,
2385 wc,
2388 wc,
2386 p2,
2389 p2,
2387 pas,
2390 pas,
2388 branchmerge,
2391 branchmerge,
2389 force,
2392 force,
2390 mergeancestor,
2393 mergeancestor,
2391 followcopies,
2394 followcopies,
2392 matcher=matcher,
2395 matcher=matcher,
2393 mergeforce=mergeforce,
2396 mergeforce=mergeforce,
2394 )
2397 )
2395
2398
2396 if updatecheck == UPDATECHECK_NO_CONFLICT:
2399 if updatecheck == UPDATECHECK_NO_CONFLICT:
2397 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2400 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2398 if m not in (
2401 if m not in (
2399 ACTION_GET,
2402 ACTION_GET,
2400 ACTION_KEEP,
2403 ACTION_KEEP,
2401 ACTION_EXEC,
2404 ACTION_EXEC,
2402 ACTION_REMOVE,
2405 ACTION_REMOVE,
2403 ACTION_PATH_CONFLICT_RESOLVE,
2406 ACTION_PATH_CONFLICT_RESOLVE,
2404 ):
2407 ):
2405 msg = _(b"conflicting changes")
2408 msg = _(b"conflicting changes")
2406 hint = _(b"commit or update --clean to discard changes")
2409 hint = _(b"commit or update --clean to discard changes")
2407 raise error.Abort(msg, hint=hint)
2410 raise error.Abort(msg, hint=hint)
2408
2411
2409 # Prompt and create actions. Most of this is in the resolve phase
2412 # Prompt and create actions. Most of this is in the resolve phase
2410 # already, but we can't handle .hgsubstate in filemerge or
2413 # already, but we can't handle .hgsubstate in filemerge or
2411 # subrepoutil.submerge yet so we have to keep prompting for it.
2414 # subrepoutil.submerge yet so we have to keep prompting for it.
2412 if b'.hgsubstate' in actionbyfile:
2415 if b'.hgsubstate' in actionbyfile:
2413 f = b'.hgsubstate'
2416 f = b'.hgsubstate'
2414 m, args, msg = actionbyfile[f]
2417 m, args, msg = actionbyfile[f]
2415 prompts = filemerge.partextras(labels)
2418 prompts = filemerge.partextras(labels)
2416 prompts[b'f'] = f
2419 prompts[b'f'] = f
2417 if m == ACTION_CHANGED_DELETED:
2420 if m == ACTION_CHANGED_DELETED:
2418 if repo.ui.promptchoice(
2421 if repo.ui.promptchoice(
2419 _(
2422 _(
2420 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2423 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2421 b"use (c)hanged version or (d)elete?"
2424 b"use (c)hanged version or (d)elete?"
2422 b"$$ &Changed $$ &Delete"
2425 b"$$ &Changed $$ &Delete"
2423 )
2426 )
2424 % prompts,
2427 % prompts,
2425 0,
2428 0,
2426 ):
2429 ):
2427 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2430 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2428 elif f in p1:
2431 elif f in p1:
2429 actionbyfile[f] = (
2432 actionbyfile[f] = (
2430 ACTION_ADD_MODIFIED,
2433 ACTION_ADD_MODIFIED,
2431 None,
2434 None,
2432 b'prompt keep',
2435 b'prompt keep',
2433 )
2436 )
2434 else:
2437 else:
2435 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2438 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2436 elif m == ACTION_DELETED_CHANGED:
2439 elif m == ACTION_DELETED_CHANGED:
2437 f1, f2, fa, move, anc = args
2440 f1, f2, fa, move, anc = args
2438 flags = p2[f2].flags()
2441 flags = p2[f2].flags()
2439 if (
2442 if (
2440 repo.ui.promptchoice(
2443 repo.ui.promptchoice(
2441 _(
2444 _(
2442 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2445 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2443 b"use (c)hanged version or leave (d)eleted?"
2446 b"use (c)hanged version or leave (d)eleted?"
2444 b"$$ &Changed $$ &Deleted"
2447 b"$$ &Changed $$ &Deleted"
2445 )
2448 )
2446 % prompts,
2449 % prompts,
2447 0,
2450 0,
2448 )
2451 )
2449 == 0
2452 == 0
2450 ):
2453 ):
2451 actionbyfile[f] = (
2454 actionbyfile[f] = (
2452 ACTION_GET,
2455 ACTION_GET,
2453 (flags, False),
2456 (flags, False),
2454 b'prompt recreating',
2457 b'prompt recreating',
2455 )
2458 )
2456 else:
2459 else:
2457 del actionbyfile[f]
2460 del actionbyfile[f]
2458
2461
2459 # Convert to dictionary-of-lists format
2462 # Convert to dictionary-of-lists format
2460 actions = emptyactions()
2463 actions = emptyactions()
2461 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2464 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2462 if m not in actions:
2465 if m not in actions:
2463 actions[m] = []
2466 actions[m] = []
2464 actions[m].append((f, args, msg))
2467 actions[m].append((f, args, msg))
2465
2468
2466 if not util.fscasesensitive(repo.path):
2469 if not util.fscasesensitive(repo.path):
2467 # check collision between files only in p2 for clean update
2470 # check collision between files only in p2 for clean update
2468 if not branchmerge and (
2471 if not branchmerge and (
2469 force or not wc.dirty(missing=True, branch=False)
2472 force or not wc.dirty(missing=True, branch=False)
2470 ):
2473 ):
2471 _checkcollision(repo, p2.manifest(), None)
2474 _checkcollision(repo, p2.manifest(), None)
2472 else:
2475 else:
2473 _checkcollision(repo, wc.manifest(), actions)
2476 _checkcollision(repo, wc.manifest(), actions)
2474
2477
2475 # divergent renames
2478 # divergent renames
2476 for f, fl in sorted(pycompat.iteritems(diverge)):
2479 for f, fl in sorted(pycompat.iteritems(diverge)):
2477 repo.ui.warn(
2480 repo.ui.warn(
2478 _(
2481 _(
2479 b"note: possible conflict - %s was renamed "
2482 b"note: possible conflict - %s was renamed "
2480 b"multiple times to:\n"
2483 b"multiple times to:\n"
2481 )
2484 )
2482 % f
2485 % f
2483 )
2486 )
2484 for nf in sorted(fl):
2487 for nf in sorted(fl):
2485 repo.ui.warn(b" %s\n" % nf)
2488 repo.ui.warn(b" %s\n" % nf)
2486
2489
2487 # rename and delete
2490 # rename and delete
2488 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2491 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2489 repo.ui.warn(
2492 repo.ui.warn(
2490 _(
2493 _(
2491 b"note: possible conflict - %s was deleted "
2494 b"note: possible conflict - %s was deleted "
2492 b"and renamed to:\n"
2495 b"and renamed to:\n"
2493 )
2496 )
2494 % f
2497 % f
2495 )
2498 )
2496 for nf in sorted(fl):
2499 for nf in sorted(fl):
2497 repo.ui.warn(b" %s\n" % nf)
2500 repo.ui.warn(b" %s\n" % nf)
2498
2501
2499 ### apply phase
2502 ### apply phase
2500 if not branchmerge: # just jump to the new rev
2503 if not branchmerge: # just jump to the new rev
2501 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2504 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2502 # If we're doing a partial update, we need to skip updating
2505 # If we're doing a partial update, we need to skip updating
2503 # the dirstate.
2506 # the dirstate.
2504 always = matcher is None or matcher.always()
2507 always = matcher is None or matcher.always()
2505 updatedirstate = always and not wc.isinmemory()
2508 updatedirstate = always and not wc.isinmemory()
2506 if updatedirstate:
2509 if updatedirstate:
2507 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2510 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2508 # note that we're in the middle of an update
2511 # note that we're in the middle of an update
2509 repo.vfs.write(b'updatestate', p2.hex())
2512 repo.vfs.write(b'updatestate', p2.hex())
2510
2513
2511 # Advertise fsmonitor when its presence could be useful.
2514 # Advertise fsmonitor when its presence could be useful.
2512 #
2515 #
2513 # We only advertise when performing an update from an empty working
2516 # We only advertise when performing an update from an empty working
2514 # directory. This typically only occurs during initial clone.
2517 # directory. This typically only occurs during initial clone.
2515 #
2518 #
2516 # We give users a mechanism to disable the warning in case it is
2519 # We give users a mechanism to disable the warning in case it is
2517 # annoying.
2520 # annoying.
2518 #
2521 #
2519 # We only allow on Linux and MacOS because that's where fsmonitor is
2522 # We only allow on Linux and MacOS because that's where fsmonitor is
2520 # considered stable.
2523 # considered stable.
2521 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2524 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2522 fsmonitorthreshold = repo.ui.configint(
2525 fsmonitorthreshold = repo.ui.configint(
2523 b'fsmonitor', b'warn_update_file_count'
2526 b'fsmonitor', b'warn_update_file_count'
2524 )
2527 )
2525 try:
2528 try:
2526 # avoid cycle: extensions -> cmdutil -> merge
2529 # avoid cycle: extensions -> cmdutil -> merge
2527 from . import extensions
2530 from . import extensions
2528
2531
2529 extensions.find(b'fsmonitor')
2532 extensions.find(b'fsmonitor')
2530 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2533 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2531 # We intentionally don't look at whether fsmonitor has disabled
2534 # We intentionally don't look at whether fsmonitor has disabled
2532 # itself because a) fsmonitor may have already printed a warning
2535 # itself because a) fsmonitor may have already printed a warning
2533 # b) we only care about the config state here.
2536 # b) we only care about the config state here.
2534 except KeyError:
2537 except KeyError:
2535 fsmonitorenabled = False
2538 fsmonitorenabled = False
2536
2539
2537 if (
2540 if (
2538 fsmonitorwarning
2541 fsmonitorwarning
2539 and not fsmonitorenabled
2542 and not fsmonitorenabled
2540 and p1.node() == nullid
2543 and p1.node() == nullid
2541 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2544 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2542 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2545 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2543 ):
2546 ):
2544 repo.ui.warn(
2547 repo.ui.warn(
2545 _(
2548 _(
2546 b'(warning: large working directory being used without '
2549 b'(warning: large working directory being used without '
2547 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2550 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2548 b'see "hg help -e fsmonitor")\n'
2551 b'see "hg help -e fsmonitor")\n'
2549 )
2552 )
2550 )
2553 )
2551
2554
2552 wantfiledata = updatedirstate and not branchmerge
2555 wantfiledata = updatedirstate and not branchmerge
2553 stats, getfiledata = applyupdates(
2556 stats, getfiledata = applyupdates(
2554 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2557 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2555 )
2558 )
2556
2559
2557 if updatedirstate:
2560 if updatedirstate:
2558 with repo.dirstate.parentchange():
2561 with repo.dirstate.parentchange():
2559 repo.setparents(fp1, fp2)
2562 repo.setparents(fp1, fp2)
2560 recordupdates(repo, actions, branchmerge, getfiledata)
2563 recordupdates(repo, actions, branchmerge, getfiledata)
2561 # update completed, clear state
2564 # update completed, clear state
2562 util.unlink(repo.vfs.join(b'updatestate'))
2565 util.unlink(repo.vfs.join(b'updatestate'))
2563
2566
2564 if not branchmerge:
2567 if not branchmerge:
2565 repo.dirstate.setbranch(p2.branch())
2568 repo.dirstate.setbranch(p2.branch())
2566
2569
2567 # If we're updating to a location, clean up any stale temporary includes
2570 # If we're updating to a location, clean up any stale temporary includes
2568 # (ex: this happens during hg rebase --abort).
2571 # (ex: this happens during hg rebase --abort).
2569 if not branchmerge:
2572 if not branchmerge:
2570 sparse.prunetemporaryincludes(repo)
2573 sparse.prunetemporaryincludes(repo)
2571
2574
2572 if updatedirstate:
2575 if updatedirstate:
2573 repo.hook(
2576 repo.hook(
2574 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2577 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2575 )
2578 )
2576 return stats
2579 return stats
2577
2580
2578
2581
2579 def graft(
2582 def graft(
2580 repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
2583 repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
2581 ):
2584 ):
2582 """Do a graft-like merge.
2585 """Do a graft-like merge.
2583
2586
2584 This is a merge where the merge ancestor is chosen such that one
2587 This is a merge where the merge ancestor is chosen such that one
2585 or more changesets are grafted onto the current changeset. In
2588 or more changesets are grafted onto the current changeset. In
2586 addition to the merge, this fixes up the dirstate to include only
2589 addition to the merge, this fixes up the dirstate to include only
2587 a single parent (if keepparent is False) and tries to duplicate any
2590 a single parent (if keepparent is False) and tries to duplicate any
2588 renames/copies appropriately.
2591 renames/copies appropriately.
2589
2592
2590 ctx - changeset to rebase
2593 ctx - changeset to rebase
2591 base - merge base, usually ctx.p1()
2594 base - merge base, usually ctx.p1()
2592 labels - merge labels eg ['local', 'graft']
2595 labels - merge labels eg ['local', 'graft']
2593 keepparent - keep second parent if any
2596 keepparent - keep second parent if any
2594 keepconflictparent - if unresolved, keep parent used for the merge
2597 keepconflictparent - if unresolved, keep parent used for the merge
2595
2598
2596 """
2599 """
2597 # If we're grafting a descendant onto an ancestor, be sure to pass
2600 # If we're grafting a descendant onto an ancestor, be sure to pass
2598 # mergeancestor=True to update. This does two things: 1) allows the merge if
2601 # mergeancestor=True to update. This does two things: 1) allows the merge if
2599 # the destination is the same as the parent of the ctx (so we can use graft
2602 # the destination is the same as the parent of the ctx (so we can use graft
2600 # to copy commits), and 2) informs update that the incoming changes are
2603 # to copy commits), and 2) informs update that the incoming changes are
2601 # newer than the destination so it doesn't prompt about "remote changed foo
2604 # newer than the destination so it doesn't prompt about "remote changed foo
2602 # which local deleted".
2605 # which local deleted".
2603 wctx = repo[None]
2606 wctx = repo[None]
2604 pctx = wctx.p1()
2607 pctx = wctx.p1()
2605 mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
2608 mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
2606
2609
2607 stats = update(
2610 stats = update(
2608 repo,
2611 repo,
2609 ctx.node(),
2612 ctx.node(),
2610 True,
2613 True,
2611 True,
2614 True,
2612 base.node(),
2615 base.node(),
2613 mergeancestor=mergeancestor,
2616 mergeancestor=mergeancestor,
2614 labels=labels,
2617 labels=labels,
2615 )
2618 )
2616
2619
2617 if keepconflictparent and stats.unresolvedcount:
2620 if keepconflictparent and stats.unresolvedcount:
2618 pother = ctx.node()
2621 pother = ctx.node()
2619 else:
2622 else:
2620 pother = nullid
2623 pother = nullid
2621 parents = ctx.parents()
2624 parents = ctx.parents()
2622 if keepparent and len(parents) == 2 and base in parents:
2625 if keepparent and len(parents) == 2 and base in parents:
2623 parents.remove(base)
2626 parents.remove(base)
2624 pother = parents[0].node()
2627 pother = parents[0].node()
2625 # Never set both parents equal to each other
2628 # Never set both parents equal to each other
2626 if pother == pctx.node():
2629 if pother == pctx.node():
2627 pother = nullid
2630 pother = nullid
2628
2631
2629 with repo.dirstate.parentchange():
2632 with repo.dirstate.parentchange():
2630 repo.setparents(pctx.node(), pother)
2633 repo.setparents(pctx.node(), pother)
2631 repo.dirstate.write(repo.currenttransaction())
2634 repo.dirstate.write(repo.currenttransaction())
2632 # fix up dirstate for copies and renames
2635 # fix up dirstate for copies and renames
2633 copies.graftcopies(wctx, ctx, base)
2636 copies.graftcopies(wctx, ctx, base)
2634 return stats
2637 return stats
2635
2638
2636
2639
2637 def purge(
2640 def purge(
2638 repo,
2641 repo,
2639 matcher,
2642 matcher,
2640 ignored=False,
2643 ignored=False,
2641 removeemptydirs=True,
2644 removeemptydirs=True,
2642 removefiles=True,
2645 removefiles=True,
2643 abortonerror=False,
2646 abortonerror=False,
2644 noop=False,
2647 noop=False,
2645 ):
2648 ):
2646 """Purge the working directory of untracked files.
2649 """Purge the working directory of untracked files.
2647
2650
2648 ``matcher`` is a matcher configured to scan the working directory -
2651 ``matcher`` is a matcher configured to scan the working directory -
2649 potentially a subset.
2652 potentially a subset.
2650
2653
2651 ``ignored`` controls whether ignored files should also be purged.
2654 ``ignored`` controls whether ignored files should also be purged.
2652
2655
2653 ``removeemptydirs`` controls whether empty directories should be removed.
2656 ``removeemptydirs`` controls whether empty directories should be removed.
2654
2657
2655 ``removefiles`` controls whether files are removed.
2658 ``removefiles`` controls whether files are removed.
2656
2659
2657 ``abortonerror`` causes an exception to be raised if an error occurs
2660 ``abortonerror`` causes an exception to be raised if an error occurs
2658 deleting a file or directory.
2661 deleting a file or directory.
2659
2662
2660 ``noop`` controls whether to actually remove files. If not defined, actions
2663 ``noop`` controls whether to actually remove files. If not defined, actions
2661 will be taken.
2664 will be taken.
2662
2665
2663 Returns an iterable of relative paths in the working directory that were
2666 Returns an iterable of relative paths in the working directory that were
2664 or would be removed.
2667 or would be removed.
2665 """
2668 """
2666
2669
2667 def remove(removefn, path):
2670 def remove(removefn, path):
2668 try:
2671 try:
2669 removefn(path)
2672 removefn(path)
2670 except OSError:
2673 except OSError:
2671 m = _(b'%s cannot be removed') % path
2674 m = _(b'%s cannot be removed') % path
2672 if abortonerror:
2675 if abortonerror:
2673 raise error.Abort(m)
2676 raise error.Abort(m)
2674 else:
2677 else:
2675 repo.ui.warn(_(b'warning: %s\n') % m)
2678 repo.ui.warn(_(b'warning: %s\n') % m)
2676
2679
2677 # There's no API to copy a matcher. So mutate the passed matcher and
2680 # There's no API to copy a matcher. So mutate the passed matcher and
2678 # restore it when we're done.
2681 # restore it when we're done.
2679 oldtraversedir = matcher.traversedir
2682 oldtraversedir = matcher.traversedir
2680
2683
2681 res = []
2684 res = []
2682
2685
2683 try:
2686 try:
2684 if removeemptydirs:
2687 if removeemptydirs:
2685 directories = []
2688 directories = []
2686 matcher.traversedir = directories.append
2689 matcher.traversedir = directories.append
2687
2690
2688 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2691 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2689
2692
2690 if removefiles:
2693 if removefiles:
2691 for f in sorted(status.unknown + status.ignored):
2694 for f in sorted(status.unknown + status.ignored):
2692 if not noop:
2695 if not noop:
2693 repo.ui.note(_(b'removing file %s\n') % f)
2696 repo.ui.note(_(b'removing file %s\n') % f)
2694 remove(repo.wvfs.unlink, f)
2697 remove(repo.wvfs.unlink, f)
2695 res.append(f)
2698 res.append(f)
2696
2699
2697 if removeemptydirs:
2700 if removeemptydirs:
2698 for f in sorted(directories, reverse=True):
2701 for f in sorted(directories, reverse=True):
2699 if matcher(f) and not repo.wvfs.listdir(f):
2702 if matcher(f) and not repo.wvfs.listdir(f):
2700 if not noop:
2703 if not noop:
2701 repo.ui.note(_(b'removing directory %s\n') % f)
2704 repo.ui.note(_(b'removing directory %s\n') % f)
2702 remove(repo.wvfs.rmdir, f)
2705 remove(repo.wvfs.rmdir, f)
2703 res.append(f)
2706 res.append(f)
2704
2707
2705 return res
2708 return res
2706
2709
2707 finally:
2710 finally:
2708 matcher.traversedir = oldtraversedir
2711 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now