##// END OF EJS Templates
merge: start using the per-side copy dicts...
Martin von Zweigbergk -
r44661:45f0d1cd default draft
parent child Browse files
Show More
@@ -1,1165 +1,1167 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import multiprocessing
11 import multiprocessing
12 import os
12 import os
13
13
14 from .i18n import _
14 from .i18n import _
15
15
16
16
17 from .revlogutils.flagutil import REVIDX_SIDEDATA
17 from .revlogutils.flagutil import REVIDX_SIDEDATA
18
18
19 from . import (
19 from . import (
20 error,
20 error,
21 match as matchmod,
21 match as matchmod,
22 node,
22 node,
23 pathutil,
23 pathutil,
24 pycompat,
24 pycompat,
25 util,
25 util,
26 )
26 )
27
27
28 from .revlogutils import sidedata as sidedatamod
28 from .revlogutils import sidedata as sidedatamod
29
29
30 from .utils import stringutil
30 from .utils import stringutil
31
31
32
32
33 def _filter(src, dst, t):
33 def _filter(src, dst, t):
34 """filters out invalid copies after chaining"""
34 """filters out invalid copies after chaining"""
35
35
36 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
36 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
37 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
37 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
38 # in the following table (not including trivial cases). For example, case 2
38 # in the following table (not including trivial cases). For example, case 2
39 # is where a file existed in 'src' and remained under that name in 'mid' and
39 # is where a file existed in 'src' and remained under that name in 'mid' and
40 # then was renamed between 'mid' and 'dst'.
40 # then was renamed between 'mid' and 'dst'.
41 #
41 #
42 # case src mid dst result
42 # case src mid dst result
43 # 1 x y - -
43 # 1 x y - -
44 # 2 x y y x->y
44 # 2 x y y x->y
45 # 3 x y x -
45 # 3 x y x -
46 # 4 x y z x->z
46 # 4 x y z x->z
47 # 5 - x y -
47 # 5 - x y -
48 # 6 x x y x->y
48 # 6 x x y x->y
49 #
49 #
50 # _chain() takes care of chaining the copies in 'a' and 'b', but it
50 # _chain() takes care of chaining the copies in 'a' and 'b', but it
51 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
51 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
52 # between 5 and 6, so it includes all cases in its result.
52 # between 5 and 6, so it includes all cases in its result.
53 # Cases 1, 3, and 5 are then removed by _filter().
53 # Cases 1, 3, and 5 are then removed by _filter().
54
54
55 for k, v in list(t.items()):
55 for k, v in list(t.items()):
56 # remove copies from files that didn't exist
56 # remove copies from files that didn't exist
57 if v not in src:
57 if v not in src:
58 del t[k]
58 del t[k]
59 # remove criss-crossed copies
59 # remove criss-crossed copies
60 elif k in src and v in dst:
60 elif k in src and v in dst:
61 del t[k]
61 del t[k]
62 # remove copies to files that were then removed
62 # remove copies to files that were then removed
63 elif k not in dst:
63 elif k not in dst:
64 del t[k]
64 del t[k]
65
65
66
66
67 def _chain(prefix, suffix):
67 def _chain(prefix, suffix):
68 """chain two sets of copies 'prefix' and 'suffix'"""
68 """chain two sets of copies 'prefix' and 'suffix'"""
69 result = prefix.copy()
69 result = prefix.copy()
70 for key, value in pycompat.iteritems(suffix):
70 for key, value in pycompat.iteritems(suffix):
71 result[key] = prefix.get(value, value)
71 result[key] = prefix.get(value, value)
72 return result
72 return result
73
73
74
74
75 def _tracefile(fctx, am, basemf):
75 def _tracefile(fctx, am, basemf):
76 """return file context that is the ancestor of fctx present in ancestor
76 """return file context that is the ancestor of fctx present in ancestor
77 manifest am
77 manifest am
78
78
79 Note: we used to try and stop after a given limit, however checking if that
79 Note: we used to try and stop after a given limit, however checking if that
80 limit is reached turned out to be very expensive. we are better off
80 limit is reached turned out to be very expensive. we are better off
81 disabling that feature."""
81 disabling that feature."""
82
82
83 for f in fctx.ancestors():
83 for f in fctx.ancestors():
84 path = f.path()
84 path = f.path()
85 if am.get(path, None) == f.filenode():
85 if am.get(path, None) == f.filenode():
86 return path
86 return path
87 if basemf and basemf.get(path, None) == f.filenode():
87 if basemf and basemf.get(path, None) == f.filenode():
88 return path
88 return path
89
89
90
90
91 def _dirstatecopies(repo, match=None):
91 def _dirstatecopies(repo, match=None):
92 ds = repo.dirstate
92 ds = repo.dirstate
93 c = ds.copies().copy()
93 c = ds.copies().copy()
94 for k in list(c):
94 for k in list(c):
95 if ds[k] not in b'anm' or (match and not match(k)):
95 if ds[k] not in b'anm' or (match and not match(k)):
96 del c[k]
96 del c[k]
97 return c
97 return c
98
98
99
99
100 def _computeforwardmissing(a, b, match=None):
100 def _computeforwardmissing(a, b, match=None):
101 """Computes which files are in b but not a.
101 """Computes which files are in b but not a.
102 This is its own function so extensions can easily wrap this call to see what
102 This is its own function so extensions can easily wrap this call to see what
103 files _forwardcopies is about to process.
103 files _forwardcopies is about to process.
104 """
104 """
105 ma = a.manifest()
105 ma = a.manifest()
106 mb = b.manifest()
106 mb = b.manifest()
107 return mb.filesnotin(ma, match=match)
107 return mb.filesnotin(ma, match=match)
108
108
109
109
110 def usechangesetcentricalgo(repo):
110 def usechangesetcentricalgo(repo):
111 """Checks if we should use changeset-centric copy algorithms"""
111 """Checks if we should use changeset-centric copy algorithms"""
112 if repo.filecopiesmode == b'changeset-sidedata':
112 if repo.filecopiesmode == b'changeset-sidedata':
113 return True
113 return True
114 readfrom = repo.ui.config(b'experimental', b'copies.read-from')
114 readfrom = repo.ui.config(b'experimental', b'copies.read-from')
115 changesetsource = (b'changeset-only', b'compatibility')
115 changesetsource = (b'changeset-only', b'compatibility')
116 return readfrom in changesetsource
116 return readfrom in changesetsource
117
117
118
118
119 def _committedforwardcopies(a, b, base, match):
119 def _committedforwardcopies(a, b, base, match):
120 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
120 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
121 # files might have to be traced back to the fctx parent of the last
121 # files might have to be traced back to the fctx parent of the last
122 # one-side-only changeset, but not further back than that
122 # one-side-only changeset, but not further back than that
123 repo = a._repo
123 repo = a._repo
124
124
125 if usechangesetcentricalgo(repo):
125 if usechangesetcentricalgo(repo):
126 return _changesetforwardcopies(a, b, match)
126 return _changesetforwardcopies(a, b, match)
127
127
128 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
128 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
129 dbg = repo.ui.debug
129 dbg = repo.ui.debug
130 if debug:
130 if debug:
131 dbg(b'debug.copies: looking into rename from %s to %s\n' % (a, b))
131 dbg(b'debug.copies: looking into rename from %s to %s\n' % (a, b))
132 am = a.manifest()
132 am = a.manifest()
133 basemf = None if base is None else base.manifest()
133 basemf = None if base is None else base.manifest()
134
134
135 # find where new files came from
135 # find where new files came from
136 # we currently don't try to find where old files went, too expensive
136 # we currently don't try to find where old files went, too expensive
137 # this means we can miss a case like 'hg rm b; hg cp a b'
137 # this means we can miss a case like 'hg rm b; hg cp a b'
138 cm = {}
138 cm = {}
139
139
140 # Computing the forward missing is quite expensive on large manifests, since
140 # Computing the forward missing is quite expensive on large manifests, since
141 # it compares the entire manifests. We can optimize it in the common use
141 # it compares the entire manifests. We can optimize it in the common use
142 # case of computing what copies are in a commit versus its parent (like
142 # case of computing what copies are in a commit versus its parent (like
143 # during a rebase or histedit). Note, we exclude merge commits from this
143 # during a rebase or histedit). Note, we exclude merge commits from this
144 # optimization, since the ctx.files() for a merge commit is not correct for
144 # optimization, since the ctx.files() for a merge commit is not correct for
145 # this comparison.
145 # this comparison.
146 forwardmissingmatch = match
146 forwardmissingmatch = match
147 if b.p1() == a and b.p2().node() == node.nullid:
147 if b.p1() == a and b.p2().node() == node.nullid:
148 filesmatcher = matchmod.exact(b.files())
148 filesmatcher = matchmod.exact(b.files())
149 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
149 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
150 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
150 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
151
151
152 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
152 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
153
153
154 if debug:
154 if debug:
155 dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
155 dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
156
156
157 for f in sorted(missing):
157 for f in sorted(missing):
158 if debug:
158 if debug:
159 dbg(b'debug.copies: tracing file: %s\n' % f)
159 dbg(b'debug.copies: tracing file: %s\n' % f)
160 fctx = b[f]
160 fctx = b[f]
161 fctx._ancestrycontext = ancestrycontext
161 fctx._ancestrycontext = ancestrycontext
162
162
163 if debug:
163 if debug:
164 start = util.timer()
164 start = util.timer()
165 opath = _tracefile(fctx, am, basemf)
165 opath = _tracefile(fctx, am, basemf)
166 if opath:
166 if opath:
167 if debug:
167 if debug:
168 dbg(b'debug.copies: rename of: %s\n' % opath)
168 dbg(b'debug.copies: rename of: %s\n' % opath)
169 cm[f] = opath
169 cm[f] = opath
170 if debug:
170 if debug:
171 dbg(
171 dbg(
172 b'debug.copies: time: %f seconds\n'
172 b'debug.copies: time: %f seconds\n'
173 % (util.timer() - start)
173 % (util.timer() - start)
174 )
174 )
175 return cm
175 return cm
176
176
177
177
178 def _revinfogetter(repo):
178 def _revinfogetter(repo):
179 """return a function that return multiple data given a <rev>"i
179 """return a function that return multiple data given a <rev>"i
180
180
181 * p1: revision number of first parent
181 * p1: revision number of first parent
182 * p2: revision number of first parent
182 * p2: revision number of first parent
183 * p1copies: mapping of copies from p1
183 * p1copies: mapping of copies from p1
184 * p2copies: mapping of copies from p2
184 * p2copies: mapping of copies from p2
185 * removed: a list of removed files
185 * removed: a list of removed files
186 """
186 """
187 cl = repo.changelog
187 cl = repo.changelog
188 parents = cl.parentrevs
188 parents = cl.parentrevs
189
189
190 if repo.filecopiesmode == b'changeset-sidedata':
190 if repo.filecopiesmode == b'changeset-sidedata':
191 changelogrevision = cl.changelogrevision
191 changelogrevision = cl.changelogrevision
192 flags = cl.flags
192 flags = cl.flags
193
193
194 # A small cache to avoid doing the work twice for merges
194 # A small cache to avoid doing the work twice for merges
195 #
195 #
196 # In the vast majority of cases, if we ask information for a revision
196 # In the vast majority of cases, if we ask information for a revision
197 # about 1 parent, we'll later ask it for the other. So it make sense to
197 # about 1 parent, we'll later ask it for the other. So it make sense to
198 # keep the information around when reaching the first parent of a merge
198 # keep the information around when reaching the first parent of a merge
199 # and dropping it after it was provided for the second parents.
199 # and dropping it after it was provided for the second parents.
200 #
200 #
201 # It exists cases were only one parent of the merge will be walked. It
201 # It exists cases were only one parent of the merge will be walked. It
202 # happens when the "destination" the copy tracing is descendant from a
202 # happens when the "destination" the copy tracing is descendant from a
203 # new root, not common with the "source". In that case, we will only walk
203 # new root, not common with the "source". In that case, we will only walk
204 # through merge parents that are descendant of changesets common
204 # through merge parents that are descendant of changesets common
205 # between "source" and "destination".
205 # between "source" and "destination".
206 #
206 #
207 # With the current case implementation if such changesets have a copy
207 # With the current case implementation if such changesets have a copy
208 # information, we'll keep them in memory until the end of
208 # information, we'll keep them in memory until the end of
209 # _changesetforwardcopies. We don't expect the case to be frequent
209 # _changesetforwardcopies. We don't expect the case to be frequent
210 # enough to matters.
210 # enough to matters.
211 #
211 #
212 # In addition, it would be possible to reach pathological case, were
212 # In addition, it would be possible to reach pathological case, were
213 # many first parent are met before any second parent is reached. In
213 # many first parent are met before any second parent is reached. In
214 # that case the cache could grow. If this even become an issue one can
214 # that case the cache could grow. If this even become an issue one can
215 # safely introduce a maximum cache size. This would trade extra CPU/IO
215 # safely introduce a maximum cache size. This would trade extra CPU/IO
216 # time to save memory.
216 # time to save memory.
217 merge_caches = {}
217 merge_caches = {}
218
218
219 def revinfo(rev):
219 def revinfo(rev):
220 p1, p2 = parents(rev)
220 p1, p2 = parents(rev)
221 if flags(rev) & REVIDX_SIDEDATA:
221 if flags(rev) & REVIDX_SIDEDATA:
222 e = merge_caches.pop(rev, None)
222 e = merge_caches.pop(rev, None)
223 if e is not None:
223 if e is not None:
224 return e
224 return e
225 c = changelogrevision(rev)
225 c = changelogrevision(rev)
226 p1copies = c.p1copies
226 p1copies = c.p1copies
227 p2copies = c.p2copies
227 p2copies = c.p2copies
228 removed = c.filesremoved
228 removed = c.filesremoved
229 if p1 != node.nullrev and p2 != node.nullrev:
229 if p1 != node.nullrev and p2 != node.nullrev:
230 # XXX some case we over cache, IGNORE
230 # XXX some case we over cache, IGNORE
231 merge_caches[rev] = (p1, p2, p1copies, p2copies, removed)
231 merge_caches[rev] = (p1, p2, p1copies, p2copies, removed)
232 else:
232 else:
233 p1copies = {}
233 p1copies = {}
234 p2copies = {}
234 p2copies = {}
235 removed = []
235 removed = []
236 return p1, p2, p1copies, p2copies, removed
236 return p1, p2, p1copies, p2copies, removed
237
237
238 else:
238 else:
239
239
240 def revinfo(rev):
240 def revinfo(rev):
241 p1, p2 = parents(rev)
241 p1, p2 = parents(rev)
242 ctx = repo[rev]
242 ctx = repo[rev]
243 p1copies, p2copies = ctx._copies
243 p1copies, p2copies = ctx._copies
244 removed = ctx.filesremoved()
244 removed = ctx.filesremoved()
245 return p1, p2, p1copies, p2copies, removed
245 return p1, p2, p1copies, p2copies, removed
246
246
247 return revinfo
247 return revinfo
248
248
249
249
250 def _changesetforwardcopies(a, b, match):
250 def _changesetforwardcopies(a, b, match):
251 if a.rev() in (node.nullrev, b.rev()):
251 if a.rev() in (node.nullrev, b.rev()):
252 return {}
252 return {}
253
253
254 repo = a.repo().unfiltered()
254 repo = a.repo().unfiltered()
255 children = {}
255 children = {}
256 revinfo = _revinfogetter(repo)
256 revinfo = _revinfogetter(repo)
257
257
258 cl = repo.changelog
258 cl = repo.changelog
259 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
259 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
260 mrset = set(missingrevs)
260 mrset = set(missingrevs)
261 roots = set()
261 roots = set()
262 for r in missingrevs:
262 for r in missingrevs:
263 for p in cl.parentrevs(r):
263 for p in cl.parentrevs(r):
264 if p == node.nullrev:
264 if p == node.nullrev:
265 continue
265 continue
266 if p not in children:
266 if p not in children:
267 children[p] = [r]
267 children[p] = [r]
268 else:
268 else:
269 children[p].append(r)
269 children[p].append(r)
270 if p not in mrset:
270 if p not in mrset:
271 roots.add(p)
271 roots.add(p)
272 if not roots:
272 if not roots:
273 # no common revision to track copies from
273 # no common revision to track copies from
274 return {}
274 return {}
275 min_root = min(roots)
275 min_root = min(roots)
276
276
277 from_head = set(
277 from_head = set(
278 cl.reachableroots(min_root, [b.rev()], list(roots), includepath=True)
278 cl.reachableroots(min_root, [b.rev()], list(roots), includepath=True)
279 )
279 )
280
280
281 iterrevs = set(from_head)
281 iterrevs = set(from_head)
282 iterrevs &= mrset
282 iterrevs &= mrset
283 iterrevs.update(roots)
283 iterrevs.update(roots)
284 iterrevs.remove(b.rev())
284 iterrevs.remove(b.rev())
285 revs = sorted(iterrevs)
285 revs = sorted(iterrevs)
286 return _combinechangesetcopies(revs, children, b.rev(), revinfo, match)
286 return _combinechangesetcopies(revs, children, b.rev(), revinfo, match)
287
287
288
288
289 def _combinechangesetcopies(revs, children, targetrev, revinfo, match):
289 def _combinechangesetcopies(revs, children, targetrev, revinfo, match):
290 """combine the copies information for each item of iterrevs
290 """combine the copies information for each item of iterrevs
291
291
292 revs: sorted iterable of revision to visit
292 revs: sorted iterable of revision to visit
293 children: a {parent: [children]} mapping.
293 children: a {parent: [children]} mapping.
294 targetrev: the final copies destination revision (not in iterrevs)
294 targetrev: the final copies destination revision (not in iterrevs)
295 revinfo(rev): a function that return (p1, p2, p1copies, p2copies, removed)
295 revinfo(rev): a function that return (p1, p2, p1copies, p2copies, removed)
296 match: a matcher
296 match: a matcher
297
297
298 It returns the aggregated copies information for `targetrev`.
298 It returns the aggregated copies information for `targetrev`.
299 """
299 """
300 all_copies = {}
300 all_copies = {}
301 alwaysmatch = match.always()
301 alwaysmatch = match.always()
302 for r in revs:
302 for r in revs:
303 copies = all_copies.pop(r, None)
303 copies = all_copies.pop(r, None)
304 if copies is None:
304 if copies is None:
305 # this is a root
305 # this is a root
306 copies = {}
306 copies = {}
307 for i, c in enumerate(children[r]):
307 for i, c in enumerate(children[r]):
308 p1, p2, p1copies, p2copies, removed = revinfo(c)
308 p1, p2, p1copies, p2copies, removed = revinfo(c)
309 if r == p1:
309 if r == p1:
310 parent = 1
310 parent = 1
311 childcopies = p1copies
311 childcopies = p1copies
312 else:
312 else:
313 assert r == p2
313 assert r == p2
314 parent = 2
314 parent = 2
315 childcopies = p2copies
315 childcopies = p2copies
316 if not alwaysmatch:
316 if not alwaysmatch:
317 childcopies = {
317 childcopies = {
318 dst: src for dst, src in childcopies.items() if match(dst)
318 dst: src for dst, src in childcopies.items() if match(dst)
319 }
319 }
320 newcopies = copies
320 newcopies = copies
321 if childcopies:
321 if childcopies:
322 newcopies = _chain(newcopies, childcopies)
322 newcopies = _chain(newcopies, childcopies)
323 # _chain makes a copies, we can avoid doing so in some
323 # _chain makes a copies, we can avoid doing so in some
324 # simple/linear cases.
324 # simple/linear cases.
325 assert newcopies is not copies
325 assert newcopies is not copies
326 for f in removed:
326 for f in removed:
327 if f in newcopies:
327 if f in newcopies:
328 if newcopies is copies:
328 if newcopies is copies:
329 # copy on write to avoid affecting potential other
329 # copy on write to avoid affecting potential other
330 # branches. when there are no other branches, this
330 # branches. when there are no other branches, this
331 # could be avoided.
331 # could be avoided.
332 newcopies = copies.copy()
332 newcopies = copies.copy()
333 del newcopies[f]
333 del newcopies[f]
334 othercopies = all_copies.get(c)
334 othercopies = all_copies.get(c)
335 if othercopies is None:
335 if othercopies is None:
336 all_copies[c] = newcopies
336 all_copies[c] = newcopies
337 else:
337 else:
338 # we are the second parent to work on c, we need to merge our
338 # we are the second parent to work on c, we need to merge our
339 # work with the other.
339 # work with the other.
340 #
340 #
341 # Unlike when copies are stored in the filelog, we consider
341 # Unlike when copies are stored in the filelog, we consider
342 # it a copy even if the destination already existed on the
342 # it a copy even if the destination already existed on the
343 # other branch. It's simply too expensive to check if the
343 # other branch. It's simply too expensive to check if the
344 # file existed in the manifest.
344 # file existed in the manifest.
345 #
345 #
346 # In case of conflict, parent 1 take precedence over parent 2.
346 # In case of conflict, parent 1 take precedence over parent 2.
347 # This is an arbitrary choice made anew when implementing
347 # This is an arbitrary choice made anew when implementing
348 # changeset based copies. It was made without regards with
348 # changeset based copies. It was made without regards with
349 # potential filelog related behavior.
349 # potential filelog related behavior.
350 if parent == 1:
350 if parent == 1:
351 othercopies.update(newcopies)
351 othercopies.update(newcopies)
352 else:
352 else:
353 newcopies.update(othercopies)
353 newcopies.update(othercopies)
354 all_copies[c] = newcopies
354 all_copies[c] = newcopies
355 return all_copies[targetrev]
355 return all_copies[targetrev]
356
356
357
357
358 def _forwardcopies(a, b, base=None, match=None):
358 def _forwardcopies(a, b, base=None, match=None):
359 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
359 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
360
360
361 if base is None:
361 if base is None:
362 base = a
362 base = a
363 match = a.repo().narrowmatch(match)
363 match = a.repo().narrowmatch(match)
364 # check for working copy
364 # check for working copy
365 if b.rev() is None:
365 if b.rev() is None:
366 cm = _committedforwardcopies(a, b.p1(), base, match)
366 cm = _committedforwardcopies(a, b.p1(), base, match)
367 # combine copies from dirstate if necessary
367 # combine copies from dirstate if necessary
368 copies = _chain(cm, _dirstatecopies(b._repo, match))
368 copies = _chain(cm, _dirstatecopies(b._repo, match))
369 else:
369 else:
370 copies = _committedforwardcopies(a, b, base, match)
370 copies = _committedforwardcopies(a, b, base, match)
371 return copies
371 return copies
372
372
373
373
374 def _backwardrenames(a, b, match):
374 def _backwardrenames(a, b, match):
375 if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
375 if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
376 return {}
376 return {}
377
377
378 # Even though we're not taking copies into account, 1:n rename situations
378 # Even though we're not taking copies into account, 1:n rename situations
379 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
379 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
380 # arbitrarily pick one of the renames.
380 # arbitrarily pick one of the renames.
381 # We don't want to pass in "match" here, since that would filter
381 # We don't want to pass in "match" here, since that would filter
382 # the destination by it. Since we're reversing the copies, we want
382 # the destination by it. Since we're reversing the copies, we want
383 # to filter the source instead.
383 # to filter the source instead.
384 f = _forwardcopies(b, a)
384 f = _forwardcopies(b, a)
385 r = {}
385 r = {}
386 for k, v in sorted(pycompat.iteritems(f)):
386 for k, v in sorted(pycompat.iteritems(f)):
387 if match and not match(v):
387 if match and not match(v):
388 continue
388 continue
389 # remove copies
389 # remove copies
390 if v in a:
390 if v in a:
391 continue
391 continue
392 r[v] = k
392 r[v] = k
393 return r
393 return r
394
394
395
395
396 def pathcopies(x, y, match=None):
396 def pathcopies(x, y, match=None):
397 """find {dst@y: src@x} copy mapping for directed compare"""
397 """find {dst@y: src@x} copy mapping for directed compare"""
398 repo = x._repo
398 repo = x._repo
399 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
399 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
400 if debug:
400 if debug:
401 repo.ui.debug(
401 repo.ui.debug(
402 b'debug.copies: searching copies from %s to %s\n' % (x, y)
402 b'debug.copies: searching copies from %s to %s\n' % (x, y)
403 )
403 )
404 if x == y or not x or not y:
404 if x == y or not x or not y:
405 return {}
405 return {}
406 a = y.ancestor(x)
406 a = y.ancestor(x)
407 if a == x:
407 if a == x:
408 if debug:
408 if debug:
409 repo.ui.debug(b'debug.copies: search mode: forward\n')
409 repo.ui.debug(b'debug.copies: search mode: forward\n')
410 if y.rev() is None and x == y.p1():
410 if y.rev() is None and x == y.p1():
411 # short-circuit to avoid issues with merge states
411 # short-circuit to avoid issues with merge states
412 return _dirstatecopies(repo, match)
412 return _dirstatecopies(repo, match)
413 copies = _forwardcopies(x, y, match=match)
413 copies = _forwardcopies(x, y, match=match)
414 elif a == y:
414 elif a == y:
415 if debug:
415 if debug:
416 repo.ui.debug(b'debug.copies: search mode: backward\n')
416 repo.ui.debug(b'debug.copies: search mode: backward\n')
417 copies = _backwardrenames(x, y, match=match)
417 copies = _backwardrenames(x, y, match=match)
418 else:
418 else:
419 if debug:
419 if debug:
420 repo.ui.debug(b'debug.copies: search mode: combined\n')
420 repo.ui.debug(b'debug.copies: search mode: combined\n')
421 base = None
421 base = None
422 if a.rev() != node.nullrev:
422 if a.rev() != node.nullrev:
423 base = x
423 base = x
424 copies = _chain(
424 copies = _chain(
425 _backwardrenames(x, a, match=match),
425 _backwardrenames(x, a, match=match),
426 _forwardcopies(a, y, base, match=match),
426 _forwardcopies(a, y, base, match=match),
427 )
427 )
428 _filter(x, y, copies)
428 _filter(x, y, copies)
429 return copies
429 return copies
430
430
431
431
432 def mergecopies(repo, c1, c2, base):
432 def mergecopies(repo, c1, c2, base):
433 """
433 """
434 Finds moves and copies between context c1 and c2 that are relevant for
434 Finds moves and copies between context c1 and c2 that are relevant for
435 merging. 'base' will be used as the merge base.
435 merging. 'base' will be used as the merge base.
436
436
437 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
437 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
438 files that were moved/ copied in one merge parent and modified in another.
438 files that were moved/ copied in one merge parent and modified in another.
439 For example:
439 For example:
440
440
441 o ---> 4 another commit
441 o ---> 4 another commit
442 |
442 |
443 | o ---> 3 commit that modifies a.txt
443 | o ---> 3 commit that modifies a.txt
444 | /
444 | /
445 o / ---> 2 commit that moves a.txt to b.txt
445 o / ---> 2 commit that moves a.txt to b.txt
446 |/
446 |/
447 o ---> 1 merge base
447 o ---> 1 merge base
448
448
449 If we try to rebase revision 3 on revision 4, since there is no a.txt in
449 If we try to rebase revision 3 on revision 4, since there is no a.txt in
450 revision 4, and if user have copytrace disabled, we prints the following
450 revision 4, and if user have copytrace disabled, we prints the following
451 message:
451 message:
452
452
453 ```other changed <file> which local deleted```
453 ```other changed <file> which local deleted```
454
454
455 Returns a tuple where:
455 Returns a tuple where:
456
456
457 "branch_copies" an instance of branch_copies.
457 "branch_copies" an instance of branch_copies.
458
458
459 "diverge" is a mapping of source name -> list of destination names
459 "diverge" is a mapping of source name -> list of destination names
460 for divergent renames.
460 for divergent renames.
461
461
462 This function calls different copytracing algorithms based on config.
462 This function calls different copytracing algorithms based on config.
463 """
463 """
464 # avoid silly behavior for update from empty dir
464 # avoid silly behavior for update from empty dir
465 if not c1 or not c2 or c1 == c2:
465 if not c1 or not c2 or c1 == c2:
466 return branch_copies(), {}
466 return branch_copies(), branch_copies(), {}
467
467
468 narrowmatch = c1.repo().narrowmatch()
468 narrowmatch = c1.repo().narrowmatch()
469
469
470 # avoid silly behavior for parent -> working dir
470 # avoid silly behavior for parent -> working dir
471 if c2.node() is None and c1.node() == repo.dirstate.p1():
471 if c2.node() is None and c1.node() == repo.dirstate.p1():
472 return branch_copies(_dirstatecopies(repo, narrowmatch)), {}
472 return (
473 branch_copies(_dirstatecopies(repo, narrowmatch)),
474 branch_copies(),
475 {},
476 )
473
477
474 copytracing = repo.ui.config(b'experimental', b'copytrace')
478 copytracing = repo.ui.config(b'experimental', b'copytrace')
475 if stringutil.parsebool(copytracing) is False:
479 if stringutil.parsebool(copytracing) is False:
476 # stringutil.parsebool() returns None when it is unable to parse the
480 # stringutil.parsebool() returns None when it is unable to parse the
477 # value, so we should rely on making sure copytracing is on such cases
481 # value, so we should rely on making sure copytracing is on such cases
478 return branch_copies(), {}
482 return branch_copies(), branch_copies(), {}
479
483
480 if usechangesetcentricalgo(repo):
484 if usechangesetcentricalgo(repo):
481 # The heuristics don't make sense when we need changeset-centric algos
485 # The heuristics don't make sense when we need changeset-centric algos
482 return _fullcopytracing(repo, c1, c2, base)
486 return _fullcopytracing(repo, c1, c2, base)
483
487
484 # Copy trace disabling is explicitly below the node == p1 logic above
488 # Copy trace disabling is explicitly below the node == p1 logic above
485 # because the logic above is required for a simple copy to be kept across a
489 # because the logic above is required for a simple copy to be kept across a
486 # rebase.
490 # rebase.
487 if copytracing == b'heuristics':
491 if copytracing == b'heuristics':
488 # Do full copytracing if only non-public revisions are involved as
492 # Do full copytracing if only non-public revisions are involved as
489 # that will be fast enough and will also cover the copies which could
493 # that will be fast enough and will also cover the copies which could
490 # be missed by heuristics
494 # be missed by heuristics
491 if _isfullcopytraceable(repo, c1, base):
495 if _isfullcopytraceable(repo, c1, base):
492 return _fullcopytracing(repo, c1, c2, base)
496 return _fullcopytracing(repo, c1, c2, base)
493 return _heuristicscopytracing(repo, c1, c2, base)
497 return _heuristicscopytracing(repo, c1, c2, base)
494 else:
498 else:
495 return _fullcopytracing(repo, c1, c2, base)
499 return _fullcopytracing(repo, c1, c2, base)
496
500
497
501
498 def _isfullcopytraceable(repo, c1, base):
502 def _isfullcopytraceable(repo, c1, base):
499 """ Checks that if base, source and destination are all no-public branches,
503 """ Checks that if base, source and destination are all no-public branches,
500 if yes let's use the full copytrace algorithm for increased capabilities
504 if yes let's use the full copytrace algorithm for increased capabilities
501 since it will be fast enough.
505 since it will be fast enough.
502
506
503 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
507 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
504 number of changesets from c1 to base such that if number of changesets are
508 number of changesets from c1 to base such that if number of changesets are
505 more than the limit, full copytracing algorithm won't be used.
509 more than the limit, full copytracing algorithm won't be used.
506 """
510 """
507 if c1.rev() is None:
511 if c1.rev() is None:
508 c1 = c1.p1()
512 c1 = c1.p1()
509 if c1.mutable() and base.mutable():
513 if c1.mutable() and base.mutable():
510 sourcecommitlimit = repo.ui.configint(
514 sourcecommitlimit = repo.ui.configint(
511 b'experimental', b'copytrace.sourcecommitlimit'
515 b'experimental', b'copytrace.sourcecommitlimit'
512 )
516 )
513 commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
517 commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
514 return commits < sourcecommitlimit
518 return commits < sourcecommitlimit
515 return False
519 return False
516
520
517
521
518 def _checksinglesidecopies(
522 def _checksinglesidecopies(
519 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
523 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
520 ):
524 ):
521 if src not in m2:
525 if src not in m2:
522 # deleted on side 2
526 # deleted on side 2
523 if src not in m1:
527 if src not in m1:
524 # renamed on side 1, deleted on side 2
528 # renamed on side 1, deleted on side 2
525 renamedelete[src] = dsts1
529 renamedelete[src] = dsts1
526 elif m2[src] != mb[src]:
530 elif m2[src] != mb[src]:
527 if not _related(c2[src], base[src]):
531 if not _related(c2[src], base[src]):
528 return
532 return
529 # modified on side 2
533 # modified on side 2
530 for dst in dsts1:
534 for dst in dsts1:
531 if dst not in m2:
535 if dst not in m2:
532 # dst not added on side 2 (handle as regular
536 # dst not added on side 2 (handle as regular
533 # "both created" case in manifestmerge otherwise)
537 # "both created" case in manifestmerge otherwise)
534 copy[dst] = src
538 copy[dst] = src
535
539
536
540
537 class branch_copies(object):
541 class branch_copies(object):
538 """Information about copies made on one side of a merge/graft.
542 """Information about copies made on one side of a merge/graft.
539
543
540 "copy" is a mapping from destination name -> source name,
544 "copy" is a mapping from destination name -> source name,
541 where source is in c1 and destination is in c2 or vice-versa.
545 where source is in c1 and destination is in c2 or vice-versa.
542
546
543 "movewithdir" is a mapping from source name -> destination name,
547 "movewithdir" is a mapping from source name -> destination name,
544 where the file at source present in one context but not the other
548 where the file at source present in one context but not the other
545 needs to be moved to destination by the merge process, because the
549 needs to be moved to destination by the merge process, because the
546 other context moved the directory it is in.
550 other context moved the directory it is in.
547
551
548 "renamedelete" is a mapping of source name -> list of destination
552 "renamedelete" is a mapping of source name -> list of destination
549 names for files deleted in c1 that were renamed in c2 or vice-versa.
553 names for files deleted in c1 that were renamed in c2 or vice-versa.
550
554
551 "dirmove" is a mapping of detected source dir -> destination dir renames.
555 "dirmove" is a mapping of detected source dir -> destination dir renames.
552 This is needed for handling changes to new files previously grafted into
556 This is needed for handling changes to new files previously grafted into
553 renamed directories.
557 renamed directories.
554 """
558 """
555
559
556 def __init__(
560 def __init__(
557 self, copy=None, renamedelete=None, dirmove=None, movewithdir=None
561 self, copy=None, renamedelete=None, dirmove=None, movewithdir=None
558 ):
562 ):
559 self.copy = {} if copy is None else copy
563 self.copy = {} if copy is None else copy
560 self.renamedelete = {} if renamedelete is None else renamedelete
564 self.renamedelete = {} if renamedelete is None else renamedelete
561 self.dirmove = {} if dirmove is None else dirmove
565 self.dirmove = {} if dirmove is None else dirmove
562 self.movewithdir = {} if movewithdir is None else movewithdir
566 self.movewithdir = {} if movewithdir is None else movewithdir
563
567
564
568
565 def _fullcopytracing(repo, c1, c2, base):
569 def _fullcopytracing(repo, c1, c2, base):
566 """ The full copytracing algorithm which finds all the new files that were
570 """ The full copytracing algorithm which finds all the new files that were
567 added from merge base up to the top commit and for each file it checks if
571 added from merge base up to the top commit and for each file it checks if
568 this file was copied from another file.
572 this file was copied from another file.
569
573
570 This is pretty slow when a lot of changesets are involved but will track all
574 This is pretty slow when a lot of changesets are involved but will track all
571 the copies.
575 the copies.
572 """
576 """
573 m1 = c1.manifest()
577 m1 = c1.manifest()
574 m2 = c2.manifest()
578 m2 = c2.manifest()
575 mb = base.manifest()
579 mb = base.manifest()
576
580
577 copies1 = pathcopies(base, c1)
581 copies1 = pathcopies(base, c1)
578 copies2 = pathcopies(base, c2)
582 copies2 = pathcopies(base, c2)
579
583
580 if not (copies1 or copies2):
584 if not (copies1 or copies2):
581 return branch_copies(), {}
585 return branch_copies(), branch_copies(), {}
582
586
583 inversecopies1 = {}
587 inversecopies1 = {}
584 inversecopies2 = {}
588 inversecopies2 = {}
585 for dst, src in copies1.items():
589 for dst, src in copies1.items():
586 inversecopies1.setdefault(src, []).append(dst)
590 inversecopies1.setdefault(src, []).append(dst)
587 for dst, src in copies2.items():
591 for dst, src in copies2.items():
588 inversecopies2.setdefault(src, []).append(dst)
592 inversecopies2.setdefault(src, []).append(dst)
589
593
590 copy1 = {}
594 copy1 = {}
591 copy2 = {}
595 copy2 = {}
592 diverge = {}
596 diverge = {}
593 renamedelete1 = {}
597 renamedelete1 = {}
594 renamedelete2 = {}
598 renamedelete2 = {}
595 allsources = set(inversecopies1) | set(inversecopies2)
599 allsources = set(inversecopies1) | set(inversecopies2)
596 for src in allsources:
600 for src in allsources:
597 dsts1 = inversecopies1.get(src)
601 dsts1 = inversecopies1.get(src)
598 dsts2 = inversecopies2.get(src)
602 dsts2 = inversecopies2.get(src)
599 if dsts1 and dsts2:
603 if dsts1 and dsts2:
600 # copied/renamed on both sides
604 # copied/renamed on both sides
601 if src not in m1 and src not in m2:
605 if src not in m1 and src not in m2:
602 # renamed on both sides
606 # renamed on both sides
603 dsts1 = set(dsts1)
607 dsts1 = set(dsts1)
604 dsts2 = set(dsts2)
608 dsts2 = set(dsts2)
605 # If there's some overlap in the rename destinations, we
609 # If there's some overlap in the rename destinations, we
606 # consider it not divergent. For example, if side 1 copies 'a'
610 # consider it not divergent. For example, if side 1 copies 'a'
607 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
611 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
608 # and 'd' and deletes 'a'.
612 # and 'd' and deletes 'a'.
609 if dsts1 & dsts2:
613 if dsts1 & dsts2:
610 for dst in dsts1 & dsts2:
614 for dst in dsts1 & dsts2:
611 copy1[dst] = src
615 copy1[dst] = src
612 copy2[dst] = src
616 copy2[dst] = src
613 else:
617 else:
614 diverge[src] = sorted(dsts1 | dsts2)
618 diverge[src] = sorted(dsts1 | dsts2)
615 elif src in m1 and src in m2:
619 elif src in m1 and src in m2:
616 # copied on both sides
620 # copied on both sides
617 dsts1 = set(dsts1)
621 dsts1 = set(dsts1)
618 dsts2 = set(dsts2)
622 dsts2 = set(dsts2)
619 for dst in dsts1 & dsts2:
623 for dst in dsts1 & dsts2:
620 copy1[dst] = src
624 copy1[dst] = src
621 copy2[dst] = src
625 copy2[dst] = src
622 # TODO: Handle cases where it was renamed on one side and copied
626 # TODO: Handle cases where it was renamed on one side and copied
623 # on the other side
627 # on the other side
624 elif dsts1:
628 elif dsts1:
625 # copied/renamed only on side 1
629 # copied/renamed only on side 1
626 _checksinglesidecopies(
630 _checksinglesidecopies(
627 src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
631 src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
628 )
632 )
629 elif dsts2:
633 elif dsts2:
630 # copied/renamed only on side 2
634 # copied/renamed only on side 2
631 _checksinglesidecopies(
635 _checksinglesidecopies(
632 src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
636 src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
633 )
637 )
634
638
635 # find interesting file sets from manifests
639 # find interesting file sets from manifests
636 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
640 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
637 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
641 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
638 u1 = sorted(addedinm1 - addedinm2)
642 u1 = sorted(addedinm1 - addedinm2)
639 u2 = sorted(addedinm2 - addedinm1)
643 u2 = sorted(addedinm2 - addedinm1)
640
644
641 header = b" unmatched files in %s"
645 header = b" unmatched files in %s"
642 if u1:
646 if u1:
643 repo.ui.debug(b"%s:\n %s\n" % (header % b'local', b"\n ".join(u1)))
647 repo.ui.debug(b"%s:\n %s\n" % (header % b'local', b"\n ".join(u1)))
644 if u2:
648 if u2:
645 repo.ui.debug(b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2)))
649 repo.ui.debug(b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2)))
646
650
647 if repo.ui.debugflag:
651 if repo.ui.debugflag:
648 renamedeleteset = set()
652 renamedeleteset = set()
649 divergeset = set()
653 divergeset = set()
650 for dsts in diverge.values():
654 for dsts in diverge.values():
651 divergeset.update(dsts)
655 divergeset.update(dsts)
652 for dsts in renamedelete1.values():
656 for dsts in renamedelete1.values():
653 renamedeleteset.update(dsts)
657 renamedeleteset.update(dsts)
654 for dsts in renamedelete2.values():
658 for dsts in renamedelete2.values():
655 renamedeleteset.update(dsts)
659 renamedeleteset.update(dsts)
656
660
657 repo.ui.debug(
661 repo.ui.debug(
658 b" all copies found (* = to merge, ! = divergent, "
662 b" all copies found (* = to merge, ! = divergent, "
659 b"% = renamed and deleted):\n"
663 b"% = renamed and deleted):\n"
660 )
664 )
661 for side, copies in (("local", copies1), ("remote", copies2)):
665 for side, copies in (("local", copies1), ("remote", copies2)):
662 if not copies:
666 if not copies:
663 continue
667 continue
664 repo.ui.debug(b" on %s side:\n" % side)
668 repo.ui.debug(b" on %s side:\n" % side)
665 for f in sorted(copies):
669 for f in sorted(copies):
666 note = b""
670 note = b""
667 if f in copy1 or f in copy2:
671 if f in copy1 or f in copy2:
668 note += b"*"
672 note += b"*"
669 if f in divergeset:
673 if f in divergeset:
670 note += b"!"
674 note += b"!"
671 if f in renamedeleteset:
675 if f in renamedeleteset:
672 note += b"%"
676 note += b"%"
673 repo.ui.debug(
677 repo.ui.debug(
674 b" src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
678 b" src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
675 )
679 )
676 del renamedeleteset
680 del renamedeleteset
677 del divergeset
681 del divergeset
678
682
679 repo.ui.debug(b" checking for directory renames\n")
683 repo.ui.debug(b" checking for directory renames\n")
680
684
681 dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2)
685 dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2)
682 dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1)
686 dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1)
683
687
684 copy1.update(copy2)
688 branch_copies1 = branch_copies(copy1, renamedelete1, dirmove1, movewithdir1)
685 renamedelete1.update(renamedelete2)
689 branch_copies2 = branch_copies(copy2, renamedelete2, dirmove2, movewithdir2)
686 movewithdir1.update(movewithdir2)
687 dirmove1.update(dirmove2)
688
690
689 return branch_copies(copy1, renamedelete1, dirmove1, movewithdir1), diverge
691 return branch_copies1, branch_copies2, diverge
690
692
691
693
692 def _dir_renames(repo, ctx, copy, fullcopy, addedfiles):
694 def _dir_renames(repo, ctx, copy, fullcopy, addedfiles):
693 """Finds moved directories and files that should move with them.
695 """Finds moved directories and files that should move with them.
694
696
695 ctx: the context for one of the sides
697 ctx: the context for one of the sides
696 copy: files copied on the same side (as ctx)
698 copy: files copied on the same side (as ctx)
697 fullcopy: files copied on the same side (as ctx), including those that
699 fullcopy: files copied on the same side (as ctx), including those that
698 merge.manifestmerge() won't care about
700 merge.manifestmerge() won't care about
699 addedfiles: added files on the other side (compared to ctx)
701 addedfiles: added files on the other side (compared to ctx)
700 """
702 """
701 # generate a directory move map
703 # generate a directory move map
702 d = ctx.dirs()
704 d = ctx.dirs()
703 invalid = set()
705 invalid = set()
704 dirmove = {}
706 dirmove = {}
705
707
706 # examine each file copy for a potential directory move, which is
708 # examine each file copy for a potential directory move, which is
707 # when all the files in a directory are moved to a new directory
709 # when all the files in a directory are moved to a new directory
708 for dst, src in pycompat.iteritems(fullcopy):
710 for dst, src in pycompat.iteritems(fullcopy):
709 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
711 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
710 if dsrc in invalid:
712 if dsrc in invalid:
711 # already seen to be uninteresting
713 # already seen to be uninteresting
712 continue
714 continue
713 elif dsrc in d and ddst in d:
715 elif dsrc in d and ddst in d:
714 # directory wasn't entirely moved locally
716 # directory wasn't entirely moved locally
715 invalid.add(dsrc)
717 invalid.add(dsrc)
716 elif dsrc in dirmove and dirmove[dsrc] != ddst:
718 elif dsrc in dirmove and dirmove[dsrc] != ddst:
717 # files from the same directory moved to two different places
719 # files from the same directory moved to two different places
718 invalid.add(dsrc)
720 invalid.add(dsrc)
719 else:
721 else:
720 # looks good so far
722 # looks good so far
721 dirmove[dsrc] = ddst
723 dirmove[dsrc] = ddst
722
724
723 for i in invalid:
725 for i in invalid:
724 if i in dirmove:
726 if i in dirmove:
725 del dirmove[i]
727 del dirmove[i]
726 del d, invalid
728 del d, invalid
727
729
728 if not dirmove:
730 if not dirmove:
729 return {}, {}
731 return {}, {}
730
732
731 dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
733 dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
732
734
733 for d in dirmove:
735 for d in dirmove:
734 repo.ui.debug(
736 repo.ui.debug(
735 b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
737 b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
736 )
738 )
737
739
738 movewithdir = {}
740 movewithdir = {}
739 # check unaccounted nonoverlapping files against directory moves
741 # check unaccounted nonoverlapping files against directory moves
740 for f in addedfiles:
742 for f in addedfiles:
741 if f not in fullcopy:
743 if f not in fullcopy:
742 for d in dirmove:
744 for d in dirmove:
743 if f.startswith(d):
745 if f.startswith(d):
744 # new file added in a directory that was moved, move it
746 # new file added in a directory that was moved, move it
745 df = dirmove[d] + f[len(d) :]
747 df = dirmove[d] + f[len(d) :]
746 if df not in copy:
748 if df not in copy:
747 movewithdir[f] = df
749 movewithdir[f] = df
748 repo.ui.debug(
750 repo.ui.debug(
749 b" pending file src: '%s' -> dst: '%s'\n"
751 b" pending file src: '%s' -> dst: '%s'\n"
750 % (f, df)
752 % (f, df)
751 )
753 )
752 break
754 break
753
755
754 return dirmove, movewithdir
756 return dirmove, movewithdir
755
757
756
758
757 def _heuristicscopytracing(repo, c1, c2, base):
759 def _heuristicscopytracing(repo, c1, c2, base):
758 """ Fast copytracing using filename heuristics
760 """ Fast copytracing using filename heuristics
759
761
760 Assumes that moves or renames are of following two types:
762 Assumes that moves or renames are of following two types:
761
763
762 1) Inside a directory only (same directory name but different filenames)
764 1) Inside a directory only (same directory name but different filenames)
763 2) Move from one directory to another
765 2) Move from one directory to another
764 (same filenames but different directory names)
766 (same filenames but different directory names)
765
767
766 Works only when there are no merge commits in the "source branch".
768 Works only when there are no merge commits in the "source branch".
767 Source branch is commits from base up to c2 not including base.
769 Source branch is commits from base up to c2 not including base.
768
770
769 If merge is involved it fallbacks to _fullcopytracing().
771 If merge is involved it fallbacks to _fullcopytracing().
770
772
771 Can be used by setting the following config:
773 Can be used by setting the following config:
772
774
773 [experimental]
775 [experimental]
774 copytrace = heuristics
776 copytrace = heuristics
775
777
776 In some cases the copy/move candidates found by heuristics can be very large
778 In some cases the copy/move candidates found by heuristics can be very large
777 in number and that will make the algorithm slow. The number of possible
779 in number and that will make the algorithm slow. The number of possible
778 candidates to check can be limited by using the config
780 candidates to check can be limited by using the config
779 `experimental.copytrace.movecandidateslimit` which defaults to 100.
781 `experimental.copytrace.movecandidateslimit` which defaults to 100.
780 """
782 """
781
783
782 if c1.rev() is None:
784 if c1.rev() is None:
783 c1 = c1.p1()
785 c1 = c1.p1()
784 if c2.rev() is None:
786 if c2.rev() is None:
785 c2 = c2.p1()
787 c2 = c2.p1()
786
788
787 copies = {}
788
789 changedfiles = set()
789 changedfiles = set()
790 m1 = c1.manifest()
790 m1 = c1.manifest()
791 if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
791 if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
792 # If base is not in c2 branch, we switch to fullcopytracing
792 # If base is not in c2 branch, we switch to fullcopytracing
793 repo.ui.debug(
793 repo.ui.debug(
794 b"switching to full copytracing as base is not "
794 b"switching to full copytracing as base is not "
795 b"an ancestor of c2\n"
795 b"an ancestor of c2\n"
796 )
796 )
797 return _fullcopytracing(repo, c1, c2, base)
797 return _fullcopytracing(repo, c1, c2, base)
798
798
799 ctx = c2
799 ctx = c2
800 while ctx != base:
800 while ctx != base:
801 if len(ctx.parents()) == 2:
801 if len(ctx.parents()) == 2:
802 # To keep things simple let's not handle merges
802 # To keep things simple let's not handle merges
803 repo.ui.debug(b"switching to full copytracing because of merges\n")
803 repo.ui.debug(b"switching to full copytracing because of merges\n")
804 return _fullcopytracing(repo, c1, c2, base)
804 return _fullcopytracing(repo, c1, c2, base)
805 changedfiles.update(ctx.files())
805 changedfiles.update(ctx.files())
806 ctx = ctx.p1()
806 ctx = ctx.p1()
807
807
808 copies2 = {}
808 cp = _forwardcopies(base, c2)
809 cp = _forwardcopies(base, c2)
809 for dst, src in pycompat.iteritems(cp):
810 for dst, src in pycompat.iteritems(cp):
810 if src in m1:
811 if src in m1:
811 copies[dst] = src
812 copies2[dst] = src
812
813
813 # file is missing if it isn't present in the destination, but is present in
814 # file is missing if it isn't present in the destination, but is present in
814 # the base and present in the source.
815 # the base and present in the source.
815 # Presence in the base is important to exclude added files, presence in the
816 # Presence in the base is important to exclude added files, presence in the
816 # source is important to exclude removed files.
817 # source is important to exclude removed files.
817 filt = lambda f: f not in m1 and f in base and f in c2
818 filt = lambda f: f not in m1 and f in base and f in c2
818 missingfiles = [f for f in changedfiles if filt(f)]
819 missingfiles = [f for f in changedfiles if filt(f)]
819
820
821 copies1 = {}
820 if missingfiles:
822 if missingfiles:
821 basenametofilename = collections.defaultdict(list)
823 basenametofilename = collections.defaultdict(list)
822 dirnametofilename = collections.defaultdict(list)
824 dirnametofilename = collections.defaultdict(list)
823
825
824 for f in m1.filesnotin(base.manifest()):
826 for f in m1.filesnotin(base.manifest()):
825 basename = os.path.basename(f)
827 basename = os.path.basename(f)
826 dirname = os.path.dirname(f)
828 dirname = os.path.dirname(f)
827 basenametofilename[basename].append(f)
829 basenametofilename[basename].append(f)
828 dirnametofilename[dirname].append(f)
830 dirnametofilename[dirname].append(f)
829
831
830 for f in missingfiles:
832 for f in missingfiles:
831 basename = os.path.basename(f)
833 basename = os.path.basename(f)
832 dirname = os.path.dirname(f)
834 dirname = os.path.dirname(f)
833 samebasename = basenametofilename[basename]
835 samebasename = basenametofilename[basename]
834 samedirname = dirnametofilename[dirname]
836 samedirname = dirnametofilename[dirname]
835 movecandidates = samebasename + samedirname
837 movecandidates = samebasename + samedirname
836 # f is guaranteed to be present in c2, that's why
838 # f is guaranteed to be present in c2, that's why
837 # c2.filectx(f) won't fail
839 # c2.filectx(f) won't fail
838 f2 = c2.filectx(f)
840 f2 = c2.filectx(f)
839 # we can have a lot of candidates which can slow down the heuristics
841 # we can have a lot of candidates which can slow down the heuristics
840 # config value to limit the number of candidates moves to check
842 # config value to limit the number of candidates moves to check
841 maxcandidates = repo.ui.configint(
843 maxcandidates = repo.ui.configint(
842 b'experimental', b'copytrace.movecandidateslimit'
844 b'experimental', b'copytrace.movecandidateslimit'
843 )
845 )
844
846
845 if len(movecandidates) > maxcandidates:
847 if len(movecandidates) > maxcandidates:
846 repo.ui.status(
848 repo.ui.status(
847 _(
849 _(
848 b"skipping copytracing for '%s', more "
850 b"skipping copytracing for '%s', more "
849 b"candidates than the limit: %d\n"
851 b"candidates than the limit: %d\n"
850 )
852 )
851 % (f, len(movecandidates))
853 % (f, len(movecandidates))
852 )
854 )
853 continue
855 continue
854
856
855 for candidate in movecandidates:
857 for candidate in movecandidates:
856 f1 = c1.filectx(candidate)
858 f1 = c1.filectx(candidate)
857 if _related(f1, f2):
859 if _related(f1, f2):
858 # if there are a few related copies then we'll merge
860 # if there are a few related copies then we'll merge
859 # changes into all of them. This matches the behaviour
861 # changes into all of them. This matches the behaviour
860 # of upstream copytracing
862 # of upstream copytracing
861 copies[candidate] = f
863 copies1[candidate] = f
862
864
863 return branch_copies(copies), {}
865 return branch_copies(copies1), branch_copies(copies2), {}
864
866
865
867
866 def _related(f1, f2):
868 def _related(f1, f2):
867 """return True if f1 and f2 filectx have a common ancestor
869 """return True if f1 and f2 filectx have a common ancestor
868
870
869 Walk back to common ancestor to see if the two files originate
871 Walk back to common ancestor to see if the two files originate
870 from the same file. Since workingfilectx's rev() is None it messes
872 from the same file. Since workingfilectx's rev() is None it messes
871 up the integer comparison logic, hence the pre-step check for
873 up the integer comparison logic, hence the pre-step check for
872 None (f1 and f2 can only be workingfilectx's initially).
874 None (f1 and f2 can only be workingfilectx's initially).
873 """
875 """
874
876
875 if f1 == f2:
877 if f1 == f2:
876 return True # a match
878 return True # a match
877
879
878 g1, g2 = f1.ancestors(), f2.ancestors()
880 g1, g2 = f1.ancestors(), f2.ancestors()
879 try:
881 try:
880 f1r, f2r = f1.linkrev(), f2.linkrev()
882 f1r, f2r = f1.linkrev(), f2.linkrev()
881
883
882 if f1r is None:
884 if f1r is None:
883 f1 = next(g1)
885 f1 = next(g1)
884 if f2r is None:
886 if f2r is None:
885 f2 = next(g2)
887 f2 = next(g2)
886
888
887 while True:
889 while True:
888 f1r, f2r = f1.linkrev(), f2.linkrev()
890 f1r, f2r = f1.linkrev(), f2.linkrev()
889 if f1r > f2r:
891 if f1r > f2r:
890 f1 = next(g1)
892 f1 = next(g1)
891 elif f2r > f1r:
893 elif f2r > f1r:
892 f2 = next(g2)
894 f2 = next(g2)
893 else: # f1 and f2 point to files in the same linkrev
895 else: # f1 and f2 point to files in the same linkrev
894 return f1 == f2 # true if they point to the same file
896 return f1 == f2 # true if they point to the same file
895 except StopIteration:
897 except StopIteration:
896 return False
898 return False
897
899
898
900
899 def graftcopies(wctx, ctx, base):
901 def graftcopies(wctx, ctx, base):
900 """reproduce copies between base and ctx in the wctx
902 """reproduce copies between base and ctx in the wctx
901
903
902 Unlike mergecopies(), this function will only consider copies between base
904 Unlike mergecopies(), this function will only consider copies between base
903 and ctx; it will ignore copies between base and wctx. Also unlike
905 and ctx; it will ignore copies between base and wctx. Also unlike
904 mergecopies(), this function will apply copies to the working copy (instead
906 mergecopies(), this function will apply copies to the working copy (instead
905 of just returning information about the copies). That makes it cheaper
907 of just returning information about the copies). That makes it cheaper
906 (especially in the common case of base==ctx.p1()) and useful also when
908 (especially in the common case of base==ctx.p1()) and useful also when
907 experimental.copytrace=off.
909 experimental.copytrace=off.
908
910
909 merge.update() will have already marked most copies, but it will only
911 merge.update() will have already marked most copies, but it will only
910 mark copies if it thinks the source files are related (see
912 mark copies if it thinks the source files are related (see
911 merge._related()). It will also not mark copies if the file wasn't modified
913 merge._related()). It will also not mark copies if the file wasn't modified
912 on the local side. This function adds the copies that were "missed"
914 on the local side. This function adds the copies that were "missed"
913 by merge.update().
915 by merge.update().
914 """
916 """
915 new_copies = pathcopies(base, ctx)
917 new_copies = pathcopies(base, ctx)
916 _filter(wctx.p1(), wctx, new_copies)
918 _filter(wctx.p1(), wctx, new_copies)
917 for dst, src in pycompat.iteritems(new_copies):
919 for dst, src in pycompat.iteritems(new_copies):
918 wctx[dst].markcopied(src)
920 wctx[dst].markcopied(src)
919
921
920
922
921 def computechangesetfilesadded(ctx):
923 def computechangesetfilesadded(ctx):
922 """return the list of files added in a changeset
924 """return the list of files added in a changeset
923 """
925 """
924 added = []
926 added = []
925 for f in ctx.files():
927 for f in ctx.files():
926 if not any(f in p for p in ctx.parents()):
928 if not any(f in p for p in ctx.parents()):
927 added.append(f)
929 added.append(f)
928 return added
930 return added
929
931
930
932
931 def computechangesetfilesremoved(ctx):
933 def computechangesetfilesremoved(ctx):
932 """return the list of files removed in a changeset
934 """return the list of files removed in a changeset
933 """
935 """
934 removed = []
936 removed = []
935 for f in ctx.files():
937 for f in ctx.files():
936 if f not in ctx:
938 if f not in ctx:
937 removed.append(f)
939 removed.append(f)
938 return removed
940 return removed
939
941
940
942
941 def computechangesetcopies(ctx):
943 def computechangesetcopies(ctx):
942 """return the copies data for a changeset
944 """return the copies data for a changeset
943
945
944 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
946 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
945
947
946 Each dictionnary are in the form: `{newname: oldname}`
948 Each dictionnary are in the form: `{newname: oldname}`
947 """
949 """
948 p1copies = {}
950 p1copies = {}
949 p2copies = {}
951 p2copies = {}
950 p1 = ctx.p1()
952 p1 = ctx.p1()
951 p2 = ctx.p2()
953 p2 = ctx.p2()
952 narrowmatch = ctx._repo.narrowmatch()
954 narrowmatch = ctx._repo.narrowmatch()
953 for dst in ctx.files():
955 for dst in ctx.files():
954 if not narrowmatch(dst) or dst not in ctx:
956 if not narrowmatch(dst) or dst not in ctx:
955 continue
957 continue
956 copied = ctx[dst].renamed()
958 copied = ctx[dst].renamed()
957 if not copied:
959 if not copied:
958 continue
960 continue
959 src, srcnode = copied
961 src, srcnode = copied
960 if src in p1 and p1[src].filenode() == srcnode:
962 if src in p1 and p1[src].filenode() == srcnode:
961 p1copies[dst] = src
963 p1copies[dst] = src
962 elif src in p2 and p2[src].filenode() == srcnode:
964 elif src in p2 and p2[src].filenode() == srcnode:
963 p2copies[dst] = src
965 p2copies[dst] = src
964 return p1copies, p2copies
966 return p1copies, p2copies
965
967
966
968
967 def encodecopies(files, copies):
969 def encodecopies(files, copies):
968 items = []
970 items = []
969 for i, dst in enumerate(files):
971 for i, dst in enumerate(files):
970 if dst in copies:
972 if dst in copies:
971 items.append(b'%d\0%s' % (i, copies[dst]))
973 items.append(b'%d\0%s' % (i, copies[dst]))
972 if len(items) != len(copies):
974 if len(items) != len(copies):
973 raise error.ProgrammingError(
975 raise error.ProgrammingError(
974 b'some copy targets missing from file list'
976 b'some copy targets missing from file list'
975 )
977 )
976 return b"\n".join(items)
978 return b"\n".join(items)
977
979
978
980
979 def decodecopies(files, data):
981 def decodecopies(files, data):
980 try:
982 try:
981 copies = {}
983 copies = {}
982 if not data:
984 if not data:
983 return copies
985 return copies
984 for l in data.split(b'\n'):
986 for l in data.split(b'\n'):
985 strindex, src = l.split(b'\0')
987 strindex, src = l.split(b'\0')
986 i = int(strindex)
988 i = int(strindex)
987 dst = files[i]
989 dst = files[i]
988 copies[dst] = src
990 copies[dst] = src
989 return copies
991 return copies
990 except (ValueError, IndexError):
992 except (ValueError, IndexError):
991 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
993 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
992 # used different syntax for the value.
994 # used different syntax for the value.
993 return None
995 return None
994
996
995
997
996 def encodefileindices(files, subset):
998 def encodefileindices(files, subset):
997 subset = set(subset)
999 subset = set(subset)
998 indices = []
1000 indices = []
999 for i, f in enumerate(files):
1001 for i, f in enumerate(files):
1000 if f in subset:
1002 if f in subset:
1001 indices.append(b'%d' % i)
1003 indices.append(b'%d' % i)
1002 return b'\n'.join(indices)
1004 return b'\n'.join(indices)
1003
1005
1004
1006
1005 def decodefileindices(files, data):
1007 def decodefileindices(files, data):
1006 try:
1008 try:
1007 subset = []
1009 subset = []
1008 if not data:
1010 if not data:
1009 return subset
1011 return subset
1010 for strindex in data.split(b'\n'):
1012 for strindex in data.split(b'\n'):
1011 i = int(strindex)
1013 i = int(strindex)
1012 if i < 0 or i >= len(files):
1014 if i < 0 or i >= len(files):
1013 return None
1015 return None
1014 subset.append(files[i])
1016 subset.append(files[i])
1015 return subset
1017 return subset
1016 except (ValueError, IndexError):
1018 except (ValueError, IndexError):
1017 # Perhaps someone had chosen the same key name (e.g. "added") and
1019 # Perhaps someone had chosen the same key name (e.g. "added") and
1018 # used different syntax for the value.
1020 # used different syntax for the value.
1019 return None
1021 return None
1020
1022
1021
1023
1022 def _getsidedata(srcrepo, rev):
1024 def _getsidedata(srcrepo, rev):
1023 ctx = srcrepo[rev]
1025 ctx = srcrepo[rev]
1024 filescopies = computechangesetcopies(ctx)
1026 filescopies = computechangesetcopies(ctx)
1025 filesadded = computechangesetfilesadded(ctx)
1027 filesadded = computechangesetfilesadded(ctx)
1026 filesremoved = computechangesetfilesremoved(ctx)
1028 filesremoved = computechangesetfilesremoved(ctx)
1027 sidedata = {}
1029 sidedata = {}
1028 if any([filescopies, filesadded, filesremoved]):
1030 if any([filescopies, filesadded, filesremoved]):
1029 sortedfiles = sorted(ctx.files())
1031 sortedfiles = sorted(ctx.files())
1030 p1copies, p2copies = filescopies
1032 p1copies, p2copies = filescopies
1031 p1copies = encodecopies(sortedfiles, p1copies)
1033 p1copies = encodecopies(sortedfiles, p1copies)
1032 p2copies = encodecopies(sortedfiles, p2copies)
1034 p2copies = encodecopies(sortedfiles, p2copies)
1033 filesadded = encodefileindices(sortedfiles, filesadded)
1035 filesadded = encodefileindices(sortedfiles, filesadded)
1034 filesremoved = encodefileindices(sortedfiles, filesremoved)
1036 filesremoved = encodefileindices(sortedfiles, filesremoved)
1035 if p1copies:
1037 if p1copies:
1036 sidedata[sidedatamod.SD_P1COPIES] = p1copies
1038 sidedata[sidedatamod.SD_P1COPIES] = p1copies
1037 if p2copies:
1039 if p2copies:
1038 sidedata[sidedatamod.SD_P2COPIES] = p2copies
1040 sidedata[sidedatamod.SD_P2COPIES] = p2copies
1039 if filesadded:
1041 if filesadded:
1040 sidedata[sidedatamod.SD_FILESADDED] = filesadded
1042 sidedata[sidedatamod.SD_FILESADDED] = filesadded
1041 if filesremoved:
1043 if filesremoved:
1042 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
1044 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
1043 return sidedata
1045 return sidedata
1044
1046
1045
1047
1046 def getsidedataadder(srcrepo, destrepo):
1048 def getsidedataadder(srcrepo, destrepo):
1047 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
1049 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
1048 if pycompat.iswindows or not use_w:
1050 if pycompat.iswindows or not use_w:
1049 return _get_simple_sidedata_adder(srcrepo, destrepo)
1051 return _get_simple_sidedata_adder(srcrepo, destrepo)
1050 else:
1052 else:
1051 return _get_worker_sidedata_adder(srcrepo, destrepo)
1053 return _get_worker_sidedata_adder(srcrepo, destrepo)
1052
1054
1053
1055
1054 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
1056 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
1055 """The function used by worker precomputing sidedata
1057 """The function used by worker precomputing sidedata
1056
1058
1057 It read an input queue containing revision numbers
1059 It read an input queue containing revision numbers
1058 It write in an output queue containing (rev, <sidedata-map>)
1060 It write in an output queue containing (rev, <sidedata-map>)
1059
1061
1060 The `None` input value is used as a stop signal.
1062 The `None` input value is used as a stop signal.
1061
1063
1062 The `tokens` semaphore is user to avoid having too many unprocessed
1064 The `tokens` semaphore is user to avoid having too many unprocessed
1063 entries. The workers needs to acquire one token before fetching a task.
1065 entries. The workers needs to acquire one token before fetching a task.
1064 They will be released by the consumer of the produced data.
1066 They will be released by the consumer of the produced data.
1065 """
1067 """
1066 tokens.acquire()
1068 tokens.acquire()
1067 rev = revs_queue.get()
1069 rev = revs_queue.get()
1068 while rev is not None:
1070 while rev is not None:
1069 data = _getsidedata(srcrepo, rev)
1071 data = _getsidedata(srcrepo, rev)
1070 sidedata_queue.put((rev, data))
1072 sidedata_queue.put((rev, data))
1071 tokens.acquire()
1073 tokens.acquire()
1072 rev = revs_queue.get()
1074 rev = revs_queue.get()
1073 # processing of `None` is completed, release the token.
1075 # processing of `None` is completed, release the token.
1074 tokens.release()
1076 tokens.release()
1075
1077
1076
1078
1077 BUFF_PER_WORKER = 50
1079 BUFF_PER_WORKER = 50
1078
1080
1079
1081
1080 def _get_worker_sidedata_adder(srcrepo, destrepo):
1082 def _get_worker_sidedata_adder(srcrepo, destrepo):
1081 """The parallel version of the sidedata computation
1083 """The parallel version of the sidedata computation
1082
1084
1083 This code spawn a pool of worker that precompute a buffer of sidedata
1085 This code spawn a pool of worker that precompute a buffer of sidedata
1084 before we actually need them"""
1086 before we actually need them"""
1085 # avoid circular import copies -> scmutil -> worker -> copies
1087 # avoid circular import copies -> scmutil -> worker -> copies
1086 from . import worker
1088 from . import worker
1087
1089
1088 nbworkers = worker._numworkers(srcrepo.ui)
1090 nbworkers = worker._numworkers(srcrepo.ui)
1089
1091
1090 tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
1092 tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
1091 revsq = multiprocessing.Queue()
1093 revsq = multiprocessing.Queue()
1092 sidedataq = multiprocessing.Queue()
1094 sidedataq = multiprocessing.Queue()
1093
1095
1094 assert srcrepo.filtername is None
1096 assert srcrepo.filtername is None
1095 # queue all tasks beforehand, revision numbers are small and it make
1097 # queue all tasks beforehand, revision numbers are small and it make
1096 # synchronisation simpler
1098 # synchronisation simpler
1097 #
1099 #
1098 # Since the computation for each node can be quite expensive, the overhead
1100 # Since the computation for each node can be quite expensive, the overhead
1099 # of using a single queue is not revelant. In practice, most computation
1101 # of using a single queue is not revelant. In practice, most computation
1100 # are fast but some are very expensive and dominate all the other smaller
1102 # are fast but some are very expensive and dominate all the other smaller
1101 # cost.
1103 # cost.
1102 for r in srcrepo.changelog.revs():
1104 for r in srcrepo.changelog.revs():
1103 revsq.put(r)
1105 revsq.put(r)
1104 # queue the "no more tasks" markers
1106 # queue the "no more tasks" markers
1105 for i in range(nbworkers):
1107 for i in range(nbworkers):
1106 revsq.put(None)
1108 revsq.put(None)
1107
1109
1108 allworkers = []
1110 allworkers = []
1109 for i in range(nbworkers):
1111 for i in range(nbworkers):
1110 args = (srcrepo, revsq, sidedataq, tokens)
1112 args = (srcrepo, revsq, sidedataq, tokens)
1111 w = multiprocessing.Process(target=_sidedata_worker, args=args)
1113 w = multiprocessing.Process(target=_sidedata_worker, args=args)
1112 allworkers.append(w)
1114 allworkers.append(w)
1113 w.start()
1115 w.start()
1114
1116
1115 # dictionnary to store results for revision higher than we one we are
1117 # dictionnary to store results for revision higher than we one we are
1116 # looking for. For example, if we need the sidedatamap for 42, and 43 is
1118 # looking for. For example, if we need the sidedatamap for 42, and 43 is
1117 # received, when shelve 43 for later use.
1119 # received, when shelve 43 for later use.
1118 staging = {}
1120 staging = {}
1119
1121
1120 def sidedata_companion(revlog, rev):
1122 def sidedata_companion(revlog, rev):
1121 sidedata = {}
1123 sidedata = {}
1122 if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
1124 if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
1123 # Is the data previously shelved ?
1125 # Is the data previously shelved ?
1124 sidedata = staging.pop(rev, None)
1126 sidedata = staging.pop(rev, None)
1125 if sidedata is None:
1127 if sidedata is None:
1126 # look at the queued result until we find the one we are lookig
1128 # look at the queued result until we find the one we are lookig
1127 # for (shelve the other ones)
1129 # for (shelve the other ones)
1128 r, sidedata = sidedataq.get()
1130 r, sidedata = sidedataq.get()
1129 while r != rev:
1131 while r != rev:
1130 staging[r] = sidedata
1132 staging[r] = sidedata
1131 r, sidedata = sidedataq.get()
1133 r, sidedata = sidedataq.get()
1132 tokens.release()
1134 tokens.release()
1133 return False, (), sidedata
1135 return False, (), sidedata
1134
1136
1135 return sidedata_companion
1137 return sidedata_companion
1136
1138
1137
1139
1138 def _get_simple_sidedata_adder(srcrepo, destrepo):
1140 def _get_simple_sidedata_adder(srcrepo, destrepo):
1139 """The simple version of the sidedata computation
1141 """The simple version of the sidedata computation
1140
1142
1141 It just compute it in the same thread on request"""
1143 It just compute it in the same thread on request"""
1142
1144
1143 def sidedatacompanion(revlog, rev):
1145 def sidedatacompanion(revlog, rev):
1144 sidedata = {}
1146 sidedata = {}
1145 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
1147 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
1146 sidedata = _getsidedata(srcrepo, rev)
1148 sidedata = _getsidedata(srcrepo, rev)
1147 return False, (), sidedata
1149 return False, (), sidedata
1148
1150
1149 return sidedatacompanion
1151 return sidedatacompanion
1150
1152
1151
1153
1152 def getsidedataremover(srcrepo, destrepo):
1154 def getsidedataremover(srcrepo, destrepo):
1153 def sidedatacompanion(revlog, rev):
1155 def sidedatacompanion(revlog, rev):
1154 f = ()
1156 f = ()
1155 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
1157 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
1156 if revlog.flags(rev) & REVIDX_SIDEDATA:
1158 if revlog.flags(rev) & REVIDX_SIDEDATA:
1157 f = (
1159 f = (
1158 sidedatamod.SD_P1COPIES,
1160 sidedatamod.SD_P1COPIES,
1159 sidedatamod.SD_P2COPIES,
1161 sidedatamod.SD_P2COPIES,
1160 sidedatamod.SD_FILESADDED,
1162 sidedatamod.SD_FILESADDED,
1161 sidedatamod.SD_FILESREMOVED,
1163 sidedatamod.SD_FILESREMOVED,
1162 )
1164 )
1163 return False, f, {}
1165 return False, f, {}
1164
1166
1165 return sidedatacompanion
1167 return sidedatacompanion
@@ -1,2711 +1,2721 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import shutil
11 import shutil
12 import stat
12 import stat
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from .pycompat import delattr
25 from .pycompat import delattr
26 from .thirdparty import attr
26 from .thirdparty import attr
27 from . import (
27 from . import (
28 copies,
28 copies,
29 encoding,
29 encoding,
30 error,
30 error,
31 filemerge,
31 filemerge,
32 match as matchmod,
32 match as matchmod,
33 obsutil,
33 obsutil,
34 pathutil,
34 pathutil,
35 pycompat,
35 pycompat,
36 scmutil,
36 scmutil,
37 subrepoutil,
37 subrepoutil,
38 util,
38 util,
39 worker,
39 worker,
40 )
40 )
41 from .utils import hashutil
41 from .utils import hashutil
42
42
43 _pack = struct.pack
43 _pack = struct.pack
44 _unpack = struct.unpack
44 _unpack = struct.unpack
45
45
46
46
47 def _droponode(data):
47 def _droponode(data):
48 # used for compatibility for v1
48 # used for compatibility for v1
49 bits = data.split(b'\0')
49 bits = data.split(b'\0')
50 bits = bits[:-2] + bits[-1:]
50 bits = bits[:-2] + bits[-1:]
51 return b'\0'.join(bits)
51 return b'\0'.join(bits)
52
52
53
53
54 # Merge state record types. See ``mergestate`` docs for more.
54 # Merge state record types. See ``mergestate`` docs for more.
55 RECORD_LOCAL = b'L'
55 RECORD_LOCAL = b'L'
56 RECORD_OTHER = b'O'
56 RECORD_OTHER = b'O'
57 RECORD_MERGED = b'F'
57 RECORD_MERGED = b'F'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
60 RECORD_PATH_CONFLICT = b'P'
60 RECORD_PATH_CONFLICT = b'P'
61 RECORD_MERGE_DRIVER_STATE = b'm'
61 RECORD_MERGE_DRIVER_STATE = b'm'
62 RECORD_FILE_VALUES = b'f'
62 RECORD_FILE_VALUES = b'f'
63 RECORD_LABELS = b'l'
63 RECORD_LABELS = b'l'
64 RECORD_OVERRIDE = b't'
64 RECORD_OVERRIDE = b't'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
67
67
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
69 MERGE_DRIVER_STATE_MARKED = b'm'
69 MERGE_DRIVER_STATE_MARKED = b'm'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
71
71
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
77
77
78 ACTION_FORGET = b'f'
78 ACTION_FORGET = b'f'
79 ACTION_REMOVE = b'r'
79 ACTION_REMOVE = b'r'
80 ACTION_ADD = b'a'
80 ACTION_ADD = b'a'
81 ACTION_GET = b'g'
81 ACTION_GET = b'g'
82 ACTION_PATH_CONFLICT = b'p'
82 ACTION_PATH_CONFLICT = b'p'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
84 ACTION_ADD_MODIFIED = b'am'
84 ACTION_ADD_MODIFIED = b'am'
85 ACTION_CREATED = b'c'
85 ACTION_CREATED = b'c'
86 ACTION_DELETED_CHANGED = b'dc'
86 ACTION_DELETED_CHANGED = b'dc'
87 ACTION_CHANGED_DELETED = b'cd'
87 ACTION_CHANGED_DELETED = b'cd'
88 ACTION_MERGE = b'm'
88 ACTION_MERGE = b'm'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
91 ACTION_KEEP = b'k'
91 ACTION_KEEP = b'k'
92 ACTION_EXEC = b'e'
92 ACTION_EXEC = b'e'
93 ACTION_CREATED_MERGE = b'cm'
93 ACTION_CREATED_MERGE = b'cm'
94
94
95
95
96 class mergestate(object):
96 class mergestate(object):
97 '''track 3-way merge state of individual files
97 '''track 3-way merge state of individual files
98
98
99 The merge state is stored on disk when needed. Two files are used: one with
99 The merge state is stored on disk when needed. Two files are used: one with
100 an old format (version 1), and one with a new format (version 2). Version 2
100 an old format (version 1), and one with a new format (version 2). Version 2
101 stores a superset of the data in version 1, including new kinds of records
101 stores a superset of the data in version 1, including new kinds of records
102 in the future. For more about the new format, see the documentation for
102 in the future. For more about the new format, see the documentation for
103 `_readrecordsv2`.
103 `_readrecordsv2`.
104
104
105 Each record can contain arbitrary content, and has an associated type. This
105 Each record can contain arbitrary content, and has an associated type. This
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
107 versions of Mercurial that don't support it should abort. If `type` is
107 versions of Mercurial that don't support it should abort. If `type` is
108 lowercase, the record can be safely ignored.
108 lowercase, the record can be safely ignored.
109
109
110 Currently known records:
110 Currently known records:
111
111
112 L: the node of the "local" part of the merge (hexified version)
112 L: the node of the "local" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
114 F: a file to be merged entry
114 F: a file to be merged entry
115 C: a change/delete or delete/change conflict
115 C: a change/delete or delete/change conflict
116 D: a file that the external merge driver will merge internally
116 D: a file that the external merge driver will merge internally
117 (experimental)
117 (experimental)
118 P: a path conflict (file vs directory)
118 P: a path conflict (file vs directory)
119 m: the external merge driver defined for this merge plus its run state
119 m: the external merge driver defined for this merge plus its run state
120 (experimental)
120 (experimental)
121 f: a (filename, dictionary) tuple of optional values for a given file
121 f: a (filename, dictionary) tuple of optional values for a given file
122 X: unsupported mandatory record type (used in tests)
122 X: unsupported mandatory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
124 l: the labels for the parts of the merge.
124 l: the labels for the parts of the merge.
125
125
126 Merge driver run states (experimental):
126 Merge driver run states (experimental):
127 u: driver-resolved files unmarked -- needs to be run next time we're about
127 u: driver-resolved files unmarked -- needs to be run next time we're about
128 to resolve or commit
128 to resolve or commit
129 m: driver-resolved files marked -- only needs to be run before commit
129 m: driver-resolved files marked -- only needs to be run before commit
130 s: success/skipped -- does not need to be run any more
130 s: success/skipped -- does not need to be run any more
131
131
132 Merge record states (stored in self._state, indexed by filename):
132 Merge record states (stored in self._state, indexed by filename):
133 u: unresolved conflict
133 u: unresolved conflict
134 r: resolved conflict
134 r: resolved conflict
135 pu: unresolved path conflict (file conflicts with directory)
135 pu: unresolved path conflict (file conflicts with directory)
136 pr: resolved path conflict
136 pr: resolved path conflict
137 d: driver-resolved conflict
137 d: driver-resolved conflict
138
138
139 The resolve command transitions between 'u' and 'r' for conflicts and
139 The resolve command transitions between 'u' and 'r' for conflicts and
140 'pu' and 'pr' for path conflicts.
140 'pu' and 'pr' for path conflicts.
141 '''
141 '''
142
142
143 statepathv1 = b'merge/state'
143 statepathv1 = b'merge/state'
144 statepathv2 = b'merge/state2'
144 statepathv2 = b'merge/state2'
145
145
146 @staticmethod
146 @staticmethod
147 def clean(repo, node=None, other=None, labels=None):
147 def clean(repo, node=None, other=None, labels=None):
148 """Initialize a brand new merge state, removing any existing state on
148 """Initialize a brand new merge state, removing any existing state on
149 disk."""
149 disk."""
150 ms = mergestate(repo)
150 ms = mergestate(repo)
151 ms.reset(node, other, labels)
151 ms.reset(node, other, labels)
152 return ms
152 return ms
153
153
154 @staticmethod
154 @staticmethod
155 def read(repo):
155 def read(repo):
156 """Initialize the merge state, reading it from disk."""
156 """Initialize the merge state, reading it from disk."""
157 ms = mergestate(repo)
157 ms = mergestate(repo)
158 ms._read()
158 ms._read()
159 return ms
159 return ms
160
160
161 def __init__(self, repo):
161 def __init__(self, repo):
162 """Initialize the merge state.
162 """Initialize the merge state.
163
163
164 Do not use this directly! Instead call read() or clean()."""
164 Do not use this directly! Instead call read() or clean()."""
165 self._repo = repo
165 self._repo = repo
166 self._dirty = False
166 self._dirty = False
167 self._labels = None
167 self._labels = None
168
168
169 def reset(self, node=None, other=None, labels=None):
169 def reset(self, node=None, other=None, labels=None):
170 self._state = {}
170 self._state = {}
171 self._stateextras = {}
171 self._stateextras = {}
172 self._local = None
172 self._local = None
173 self._other = None
173 self._other = None
174 self._labels = labels
174 self._labels = labels
175 for var in ('localctx', 'otherctx'):
175 for var in ('localctx', 'otherctx'):
176 if var in vars(self):
176 if var in vars(self):
177 delattr(self, var)
177 delattr(self, var)
178 if node:
178 if node:
179 self._local = node
179 self._local = node
180 self._other = other
180 self._other = other
181 self._readmergedriver = None
181 self._readmergedriver = None
182 if self.mergedriver:
182 if self.mergedriver:
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
184 else:
184 else:
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
187 self._results = {}
187 self._results = {}
188 self._dirty = False
188 self._dirty = False
189
189
190 def _read(self):
190 def _read(self):
191 """Analyse each record content to restore a serialized state from disk
191 """Analyse each record content to restore a serialized state from disk
192
192
193 This function process "record" entry produced by the de-serialization
193 This function process "record" entry produced by the de-serialization
194 of on disk file.
194 of on disk file.
195 """
195 """
196 self._state = {}
196 self._state = {}
197 self._stateextras = {}
197 self._stateextras = {}
198 self._local = None
198 self._local = None
199 self._other = None
199 self._other = None
200 for var in ('localctx', 'otherctx'):
200 for var in ('localctx', 'otherctx'):
201 if var in vars(self):
201 if var in vars(self):
202 delattr(self, var)
202 delattr(self, var)
203 self._readmergedriver = None
203 self._readmergedriver = None
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
205 unsupported = set()
205 unsupported = set()
206 records = self._readrecords()
206 records = self._readrecords()
207 for rtype, record in records:
207 for rtype, record in records:
208 if rtype == RECORD_LOCAL:
208 if rtype == RECORD_LOCAL:
209 self._local = bin(record)
209 self._local = bin(record)
210 elif rtype == RECORD_OTHER:
210 elif rtype == RECORD_OTHER:
211 self._other = bin(record)
211 self._other = bin(record)
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
213 bits = record.split(b'\0', 1)
213 bits = record.split(b'\0', 1)
214 mdstate = bits[1]
214 mdstate = bits[1]
215 if len(mdstate) != 1 or mdstate not in (
215 if len(mdstate) != 1 or mdstate not in (
216 MERGE_DRIVER_STATE_UNMARKED,
216 MERGE_DRIVER_STATE_UNMARKED,
217 MERGE_DRIVER_STATE_MARKED,
217 MERGE_DRIVER_STATE_MARKED,
218 MERGE_DRIVER_STATE_SUCCESS,
218 MERGE_DRIVER_STATE_SUCCESS,
219 ):
219 ):
220 # the merge driver should be idempotent, so just rerun it
220 # the merge driver should be idempotent, so just rerun it
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
222
222
223 self._readmergedriver = bits[0]
223 self._readmergedriver = bits[0]
224 self._mdstate = mdstate
224 self._mdstate = mdstate
225 elif rtype in (
225 elif rtype in (
226 RECORD_MERGED,
226 RECORD_MERGED,
227 RECORD_CHANGEDELETE_CONFLICT,
227 RECORD_CHANGEDELETE_CONFLICT,
228 RECORD_PATH_CONFLICT,
228 RECORD_PATH_CONFLICT,
229 RECORD_MERGE_DRIVER_MERGE,
229 RECORD_MERGE_DRIVER_MERGE,
230 ):
230 ):
231 bits = record.split(b'\0')
231 bits = record.split(b'\0')
232 self._state[bits[0]] = bits[1:]
232 self._state[bits[0]] = bits[1:]
233 elif rtype == RECORD_FILE_VALUES:
233 elif rtype == RECORD_FILE_VALUES:
234 filename, rawextras = record.split(b'\0', 1)
234 filename, rawextras = record.split(b'\0', 1)
235 extraparts = rawextras.split(b'\0')
235 extraparts = rawextras.split(b'\0')
236 extras = {}
236 extras = {}
237 i = 0
237 i = 0
238 while i < len(extraparts):
238 while i < len(extraparts):
239 extras[extraparts[i]] = extraparts[i + 1]
239 extras[extraparts[i]] = extraparts[i + 1]
240 i += 2
240 i += 2
241
241
242 self._stateextras[filename] = extras
242 self._stateextras[filename] = extras
243 elif rtype == RECORD_LABELS:
243 elif rtype == RECORD_LABELS:
244 labels = record.split(b'\0', 2)
244 labels = record.split(b'\0', 2)
245 self._labels = [l for l in labels if len(l) > 0]
245 self._labels = [l for l in labels if len(l) > 0]
246 elif not rtype.islower():
246 elif not rtype.islower():
247 unsupported.add(rtype)
247 unsupported.add(rtype)
248 self._results = {}
248 self._results = {}
249 self._dirty = False
249 self._dirty = False
250
250
251 if unsupported:
251 if unsupported:
252 raise error.UnsupportedMergeRecords(unsupported)
252 raise error.UnsupportedMergeRecords(unsupported)
253
253
254 def _readrecords(self):
254 def _readrecords(self):
255 """Read merge state from disk and return a list of record (TYPE, data)
255 """Read merge state from disk and return a list of record (TYPE, data)
256
256
257 We read data from both v1 and v2 files and decide which one to use.
257 We read data from both v1 and v2 files and decide which one to use.
258
258
259 V1 has been used by version prior to 2.9.1 and contains less data than
259 V1 has been used by version prior to 2.9.1 and contains less data than
260 v2. We read both versions and check if no data in v2 contradicts
260 v2. We read both versions and check if no data in v2 contradicts
261 v1. If there is not contradiction we can safely assume that both v1
261 v1. If there is not contradiction we can safely assume that both v1
262 and v2 were written at the same time and use the extract data in v2. If
262 and v2 were written at the same time and use the extract data in v2. If
263 there is contradiction we ignore v2 content as we assume an old version
263 there is contradiction we ignore v2 content as we assume an old version
264 of Mercurial has overwritten the mergestate file and left an old v2
264 of Mercurial has overwritten the mergestate file and left an old v2
265 file around.
265 file around.
266
266
267 returns list of record [(TYPE, data), ...]"""
267 returns list of record [(TYPE, data), ...]"""
268 v1records = self._readrecordsv1()
268 v1records = self._readrecordsv1()
269 v2records = self._readrecordsv2()
269 v2records = self._readrecordsv2()
270 if self._v1v2match(v1records, v2records):
270 if self._v1v2match(v1records, v2records):
271 return v2records
271 return v2records
272 else:
272 else:
273 # v1 file is newer than v2 file, use it
273 # v1 file is newer than v2 file, use it
274 # we have to infer the "other" changeset of the merge
274 # we have to infer the "other" changeset of the merge
275 # we cannot do better than that with v1 of the format
275 # we cannot do better than that with v1 of the format
276 mctx = self._repo[None].parents()[-1]
276 mctx = self._repo[None].parents()[-1]
277 v1records.append((RECORD_OTHER, mctx.hex()))
277 v1records.append((RECORD_OTHER, mctx.hex()))
278 # add place holder "other" file node information
278 # add place holder "other" file node information
279 # nobody is using it yet so we do no need to fetch the data
279 # nobody is using it yet so we do no need to fetch the data
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
281 for idx, r in enumerate(v1records):
281 for idx, r in enumerate(v1records):
282 if r[0] == RECORD_MERGED:
282 if r[0] == RECORD_MERGED:
283 bits = r[1].split(b'\0')
283 bits = r[1].split(b'\0')
284 bits.insert(-2, b'')
284 bits.insert(-2, b'')
285 v1records[idx] = (r[0], b'\0'.join(bits))
285 v1records[idx] = (r[0], b'\0'.join(bits))
286 return v1records
286 return v1records
287
287
288 def _v1v2match(self, v1records, v2records):
288 def _v1v2match(self, v1records, v2records):
289 oldv2 = set() # old format version of v2 record
289 oldv2 = set() # old format version of v2 record
290 for rec in v2records:
290 for rec in v2records:
291 if rec[0] == RECORD_LOCAL:
291 if rec[0] == RECORD_LOCAL:
292 oldv2.add(rec)
292 oldv2.add(rec)
293 elif rec[0] == RECORD_MERGED:
293 elif rec[0] == RECORD_MERGED:
294 # drop the onode data (not contained in v1)
294 # drop the onode data (not contained in v1)
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
296 for rec in v1records:
296 for rec in v1records:
297 if rec not in oldv2:
297 if rec not in oldv2:
298 return False
298 return False
299 else:
299 else:
300 return True
300 return True
301
301
302 def _readrecordsv1(self):
302 def _readrecordsv1(self):
303 """read on disk merge state for version 1 file
303 """read on disk merge state for version 1 file
304
304
305 returns list of record [(TYPE, data), ...]
305 returns list of record [(TYPE, data), ...]
306
306
307 Note: the "F" data from this file are one entry short
307 Note: the "F" data from this file are one entry short
308 (no "other file node" entry)
308 (no "other file node" entry)
309 """
309 """
310 records = []
310 records = []
311 try:
311 try:
312 f = self._repo.vfs(self.statepathv1)
312 f = self._repo.vfs(self.statepathv1)
313 for i, l in enumerate(f):
313 for i, l in enumerate(f):
314 if i == 0:
314 if i == 0:
315 records.append((RECORD_LOCAL, l[:-1]))
315 records.append((RECORD_LOCAL, l[:-1]))
316 else:
316 else:
317 records.append((RECORD_MERGED, l[:-1]))
317 records.append((RECORD_MERGED, l[:-1]))
318 f.close()
318 f.close()
319 except IOError as err:
319 except IOError as err:
320 if err.errno != errno.ENOENT:
320 if err.errno != errno.ENOENT:
321 raise
321 raise
322 return records
322 return records
323
323
324 def _readrecordsv2(self):
324 def _readrecordsv2(self):
325 """read on disk merge state for version 2 file
325 """read on disk merge state for version 2 file
326
326
327 This format is a list of arbitrary records of the form:
327 This format is a list of arbitrary records of the form:
328
328
329 [type][length][content]
329 [type][length][content]
330
330
331 `type` is a single character, `length` is a 4 byte integer, and
331 `type` is a single character, `length` is a 4 byte integer, and
332 `content` is an arbitrary byte sequence of length `length`.
332 `content` is an arbitrary byte sequence of length `length`.
333
333
334 Mercurial versions prior to 3.7 have a bug where if there are
334 Mercurial versions prior to 3.7 have a bug where if there are
335 unsupported mandatory merge records, attempting to clear out the merge
335 unsupported mandatory merge records, attempting to clear out the merge
336 state with hg update --clean or similar aborts. The 't' record type
336 state with hg update --clean or similar aborts. The 't' record type
337 works around that by writing out what those versions treat as an
337 works around that by writing out what those versions treat as an
338 advisory record, but later versions interpret as special: the first
338 advisory record, but later versions interpret as special: the first
339 character is the 'real' record type and everything onwards is the data.
339 character is the 'real' record type and everything onwards is the data.
340
340
341 Returns list of records [(TYPE, data), ...]."""
341 Returns list of records [(TYPE, data), ...]."""
342 records = []
342 records = []
343 try:
343 try:
344 f = self._repo.vfs(self.statepathv2)
344 f = self._repo.vfs(self.statepathv2)
345 data = f.read()
345 data = f.read()
346 off = 0
346 off = 0
347 end = len(data)
347 end = len(data)
348 while off < end:
348 while off < end:
349 rtype = data[off : off + 1]
349 rtype = data[off : off + 1]
350 off += 1
350 off += 1
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
352 off += 4
352 off += 4
353 record = data[off : (off + length)]
353 record = data[off : (off + length)]
354 off += length
354 off += length
355 if rtype == RECORD_OVERRIDE:
355 if rtype == RECORD_OVERRIDE:
356 rtype, record = record[0:1], record[1:]
356 rtype, record = record[0:1], record[1:]
357 records.append((rtype, record))
357 records.append((rtype, record))
358 f.close()
358 f.close()
359 except IOError as err:
359 except IOError as err:
360 if err.errno != errno.ENOENT:
360 if err.errno != errno.ENOENT:
361 raise
361 raise
362 return records
362 return records
363
363
364 @util.propertycache
364 @util.propertycache
365 def mergedriver(self):
365 def mergedriver(self):
366 # protect against the following:
366 # protect against the following:
367 # - A configures a malicious merge driver in their hgrc, then
367 # - A configures a malicious merge driver in their hgrc, then
368 # pauses the merge
368 # pauses the merge
369 # - A edits their hgrc to remove references to the merge driver
369 # - A edits their hgrc to remove references to the merge driver
370 # - A gives a copy of their entire repo, including .hg, to B
370 # - A gives a copy of their entire repo, including .hg, to B
371 # - B inspects .hgrc and finds it to be clean
371 # - B inspects .hgrc and finds it to be clean
372 # - B then continues the merge and the malicious merge driver
372 # - B then continues the merge and the malicious merge driver
373 # gets invoked
373 # gets invoked
374 configmergedriver = self._repo.ui.config(
374 configmergedriver = self._repo.ui.config(
375 b'experimental', b'mergedriver'
375 b'experimental', b'mergedriver'
376 )
376 )
377 if (
377 if (
378 self._readmergedriver is not None
378 self._readmergedriver is not None
379 and self._readmergedriver != configmergedriver
379 and self._readmergedriver != configmergedriver
380 ):
380 ):
381 raise error.ConfigError(
381 raise error.ConfigError(
382 _(b"merge driver changed since merge started"),
382 _(b"merge driver changed since merge started"),
383 hint=_(b"revert merge driver change or abort merge"),
383 hint=_(b"revert merge driver change or abort merge"),
384 )
384 )
385
385
386 return configmergedriver
386 return configmergedriver
387
387
388 @util.propertycache
388 @util.propertycache
389 def localctx(self):
389 def localctx(self):
390 if self._local is None:
390 if self._local is None:
391 msg = b"localctx accessed but self._local isn't set"
391 msg = b"localctx accessed but self._local isn't set"
392 raise error.ProgrammingError(msg)
392 raise error.ProgrammingError(msg)
393 return self._repo[self._local]
393 return self._repo[self._local]
394
394
395 @util.propertycache
395 @util.propertycache
396 def otherctx(self):
396 def otherctx(self):
397 if self._other is None:
397 if self._other is None:
398 msg = b"otherctx accessed but self._other isn't set"
398 msg = b"otherctx accessed but self._other isn't set"
399 raise error.ProgrammingError(msg)
399 raise error.ProgrammingError(msg)
400 return self._repo[self._other]
400 return self._repo[self._other]
401
401
402 def active(self):
402 def active(self):
403 """Whether mergestate is active.
403 """Whether mergestate is active.
404
404
405 Returns True if there appears to be mergestate. This is a rough proxy
405 Returns True if there appears to be mergestate. This is a rough proxy
406 for "is a merge in progress."
406 for "is a merge in progress."
407 """
407 """
408 # Check local variables before looking at filesystem for performance
408 # Check local variables before looking at filesystem for performance
409 # reasons.
409 # reasons.
410 return (
410 return (
411 bool(self._local)
411 bool(self._local)
412 or bool(self._state)
412 or bool(self._state)
413 or self._repo.vfs.exists(self.statepathv1)
413 or self._repo.vfs.exists(self.statepathv1)
414 or self._repo.vfs.exists(self.statepathv2)
414 or self._repo.vfs.exists(self.statepathv2)
415 )
415 )
416
416
417 def commit(self):
417 def commit(self):
418 """Write current state on disk (if necessary)"""
418 """Write current state on disk (if necessary)"""
419 if self._dirty:
419 if self._dirty:
420 records = self._makerecords()
420 records = self._makerecords()
421 self._writerecords(records)
421 self._writerecords(records)
422 self._dirty = False
422 self._dirty = False
423
423
424 def _makerecords(self):
424 def _makerecords(self):
425 records = []
425 records = []
426 records.append((RECORD_LOCAL, hex(self._local)))
426 records.append((RECORD_LOCAL, hex(self._local)))
427 records.append((RECORD_OTHER, hex(self._other)))
427 records.append((RECORD_OTHER, hex(self._other)))
428 if self.mergedriver:
428 if self.mergedriver:
429 records.append(
429 records.append(
430 (
430 (
431 RECORD_MERGE_DRIVER_STATE,
431 RECORD_MERGE_DRIVER_STATE,
432 b'\0'.join([self.mergedriver, self._mdstate]),
432 b'\0'.join([self.mergedriver, self._mdstate]),
433 )
433 )
434 )
434 )
435 # Write out state items. In all cases, the value of the state map entry
435 # Write out state items. In all cases, the value of the state map entry
436 # is written as the contents of the record. The record type depends on
436 # is written as the contents of the record. The record type depends on
437 # the type of state that is stored, and capital-letter records are used
437 # the type of state that is stored, and capital-letter records are used
438 # to prevent older versions of Mercurial that do not support the feature
438 # to prevent older versions of Mercurial that do not support the feature
439 # from loading them.
439 # from loading them.
440 for filename, v in pycompat.iteritems(self._state):
440 for filename, v in pycompat.iteritems(self._state):
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
442 # Driver-resolved merge. These are stored in 'D' records.
442 # Driver-resolved merge. These are stored in 'D' records.
443 records.append(
443 records.append(
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
445 )
445 )
446 elif v[0] in (
446 elif v[0] in (
447 MERGE_RECORD_UNRESOLVED_PATH,
447 MERGE_RECORD_UNRESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
449 ):
449 ):
450 # Path conflicts. These are stored in 'P' records. The current
450 # Path conflicts. These are stored in 'P' records. The current
451 # resolution state ('pu' or 'pr') is stored within the record.
451 # resolution state ('pu' or 'pr') is stored within the record.
452 records.append(
452 records.append(
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
454 )
454 )
455 elif v[1] == nullhex or v[6] == nullhex:
455 elif v[1] == nullhex or v[6] == nullhex:
456 # Change/Delete or Delete/Change conflicts. These are stored in
456 # Change/Delete or Delete/Change conflicts. These are stored in
457 # 'C' records. v[1] is the local file, and is nullhex when the
457 # 'C' records. v[1] is the local file, and is nullhex when the
458 # file is deleted locally ('dc'). v[6] is the remote file, and
458 # file is deleted locally ('dc'). v[6] is the remote file, and
459 # is nullhex when the file is deleted remotely ('cd').
459 # is nullhex when the file is deleted remotely ('cd').
460 records.append(
460 records.append(
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
462 )
462 )
463 else:
463 else:
464 # Normal files. These are stored in 'F' records.
464 # Normal files. These are stored in 'F' records.
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
467 rawextras = b'\0'.join(
467 rawextras = b'\0'.join(
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
469 )
469 )
470 records.append(
470 records.append(
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
472 )
472 )
473 if self._labels is not None:
473 if self._labels is not None:
474 labels = b'\0'.join(self._labels)
474 labels = b'\0'.join(self._labels)
475 records.append((RECORD_LABELS, labels))
475 records.append((RECORD_LABELS, labels))
476 return records
476 return records
477
477
478 def _writerecords(self, records):
478 def _writerecords(self, records):
479 """Write current state on disk (both v1 and v2)"""
479 """Write current state on disk (both v1 and v2)"""
480 self._writerecordsv1(records)
480 self._writerecordsv1(records)
481 self._writerecordsv2(records)
481 self._writerecordsv2(records)
482
482
483 def _writerecordsv1(self, records):
483 def _writerecordsv1(self, records):
484 """Write current state on disk in a version 1 file"""
484 """Write current state on disk in a version 1 file"""
485 f = self._repo.vfs(self.statepathv1, b'wb')
485 f = self._repo.vfs(self.statepathv1, b'wb')
486 irecords = iter(records)
486 irecords = iter(records)
487 lrecords = next(irecords)
487 lrecords = next(irecords)
488 assert lrecords[0] == RECORD_LOCAL
488 assert lrecords[0] == RECORD_LOCAL
489 f.write(hex(self._local) + b'\n')
489 f.write(hex(self._local) + b'\n')
490 for rtype, data in irecords:
490 for rtype, data in irecords:
491 if rtype == RECORD_MERGED:
491 if rtype == RECORD_MERGED:
492 f.write(b'%s\n' % _droponode(data))
492 f.write(b'%s\n' % _droponode(data))
493 f.close()
493 f.close()
494
494
495 def _writerecordsv2(self, records):
495 def _writerecordsv2(self, records):
496 """Write current state on disk in a version 2 file
496 """Write current state on disk in a version 2 file
497
497
498 See the docstring for _readrecordsv2 for why we use 't'."""
498 See the docstring for _readrecordsv2 for why we use 't'."""
499 # these are the records that all version 2 clients can read
499 # these are the records that all version 2 clients can read
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
501 f = self._repo.vfs(self.statepathv2, b'wb')
501 f = self._repo.vfs(self.statepathv2, b'wb')
502 for key, data in records:
502 for key, data in records:
503 assert len(key) == 1
503 assert len(key) == 1
504 if key not in allowlist:
504 if key not in allowlist:
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
506 format = b'>sI%is' % len(data)
506 format = b'>sI%is' % len(data)
507 f.write(_pack(format, key, len(data), data))
507 f.write(_pack(format, key, len(data), data))
508 f.close()
508 f.close()
509
509
510 @staticmethod
510 @staticmethod
511 def getlocalkey(path):
511 def getlocalkey(path):
512 """hash the path of a local file context for storage in the .hg/merge
512 """hash the path of a local file context for storage in the .hg/merge
513 directory."""
513 directory."""
514
514
515 return hex(hashutil.sha1(path).digest())
515 return hex(hashutil.sha1(path).digest())
516
516
517 def add(self, fcl, fco, fca, fd):
517 def add(self, fcl, fco, fca, fd):
518 """add a new (potentially?) conflicting file the merge state
518 """add a new (potentially?) conflicting file the merge state
519 fcl: file context for local,
519 fcl: file context for local,
520 fco: file context for remote,
520 fco: file context for remote,
521 fca: file context for ancestors,
521 fca: file context for ancestors,
522 fd: file path of the resulting merge.
522 fd: file path of the resulting merge.
523
523
524 note: also write the local version to the `.hg/merge` directory.
524 note: also write the local version to the `.hg/merge` directory.
525 """
525 """
526 if fcl.isabsent():
526 if fcl.isabsent():
527 localkey = nullhex
527 localkey = nullhex
528 else:
528 else:
529 localkey = mergestate.getlocalkey(fcl.path())
529 localkey = mergestate.getlocalkey(fcl.path())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
531 self._state[fd] = [
531 self._state[fd] = [
532 MERGE_RECORD_UNRESOLVED,
532 MERGE_RECORD_UNRESOLVED,
533 localkey,
533 localkey,
534 fcl.path(),
534 fcl.path(),
535 fca.path(),
535 fca.path(),
536 hex(fca.filenode()),
536 hex(fca.filenode()),
537 fco.path(),
537 fco.path(),
538 hex(fco.filenode()),
538 hex(fco.filenode()),
539 fcl.flags(),
539 fcl.flags(),
540 ]
540 ]
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
542 self._dirty = True
542 self._dirty = True
543
543
544 def addpath(self, path, frename, forigin):
544 def addpath(self, path, frename, forigin):
545 """add a new conflicting path to the merge state
545 """add a new conflicting path to the merge state
546 path: the path that conflicts
546 path: the path that conflicts
547 frename: the filename the conflicting file was renamed to
547 frename: the filename the conflicting file was renamed to
548 forigin: origin of the file ('l' or 'r' for local/remote)
548 forigin: origin of the file ('l' or 'r' for local/remote)
549 """
549 """
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
551 self._dirty = True
551 self._dirty = True
552
552
553 def __contains__(self, dfile):
553 def __contains__(self, dfile):
554 return dfile in self._state
554 return dfile in self._state
555
555
556 def __getitem__(self, dfile):
556 def __getitem__(self, dfile):
557 return self._state[dfile][0]
557 return self._state[dfile][0]
558
558
559 def __iter__(self):
559 def __iter__(self):
560 return iter(sorted(self._state))
560 return iter(sorted(self._state))
561
561
562 def files(self):
562 def files(self):
563 return self._state.keys()
563 return self._state.keys()
564
564
565 def mark(self, dfile, state):
565 def mark(self, dfile, state):
566 self._state[dfile][0] = state
566 self._state[dfile][0] = state
567 self._dirty = True
567 self._dirty = True
568
568
569 def mdstate(self):
569 def mdstate(self):
570 return self._mdstate
570 return self._mdstate
571
571
572 def unresolved(self):
572 def unresolved(self):
573 """Obtain the paths of unresolved files."""
573 """Obtain the paths of unresolved files."""
574
574
575 for f, entry in pycompat.iteritems(self._state):
575 for f, entry in pycompat.iteritems(self._state):
576 if entry[0] in (
576 if entry[0] in (
577 MERGE_RECORD_UNRESOLVED,
577 MERGE_RECORD_UNRESOLVED,
578 MERGE_RECORD_UNRESOLVED_PATH,
578 MERGE_RECORD_UNRESOLVED_PATH,
579 ):
579 ):
580 yield f
580 yield f
581
581
582 def driverresolved(self):
582 def driverresolved(self):
583 """Obtain the paths of driver-resolved files."""
583 """Obtain the paths of driver-resolved files."""
584
584
585 for f, entry in self._state.items():
585 for f, entry in self._state.items():
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
587 yield f
587 yield f
588
588
589 def extras(self, filename):
589 def extras(self, filename):
590 return self._stateextras.setdefault(filename, {})
590 return self._stateextras.setdefault(filename, {})
591
591
592 def _resolve(self, preresolve, dfile, wctx):
592 def _resolve(self, preresolve, dfile, wctx):
593 """rerun merge process for file path `dfile`"""
593 """rerun merge process for file path `dfile`"""
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
595 return True, 0
595 return True, 0
596 stateentry = self._state[dfile]
596 stateentry = self._state[dfile]
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
598 octx = self._repo[self._other]
598 octx = self._repo[self._other]
599 extras = self.extras(dfile)
599 extras = self.extras(dfile)
600 anccommitnode = extras.get(b'ancestorlinknode')
600 anccommitnode = extras.get(b'ancestorlinknode')
601 if anccommitnode:
601 if anccommitnode:
602 actx = self._repo[anccommitnode]
602 actx = self._repo[anccommitnode]
603 else:
603 else:
604 actx = None
604 actx = None
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
606 fco = self._filectxorabsent(onode, octx, ofile)
606 fco = self._filectxorabsent(onode, octx, ofile)
607 # TODO: move this to filectxorabsent
607 # TODO: move this to filectxorabsent
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
609 # "premerge" x flags
609 # "premerge" x flags
610 flo = fco.flags()
610 flo = fco.flags()
611 fla = fca.flags()
611 fla = fca.flags()
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
613 if fca.node() == nullid and flags != flo:
613 if fca.node() == nullid and flags != flo:
614 if preresolve:
614 if preresolve:
615 self._repo.ui.warn(
615 self._repo.ui.warn(
616 _(
616 _(
617 b'warning: cannot merge flags for %s '
617 b'warning: cannot merge flags for %s '
618 b'without common ancestor - keeping local flags\n'
618 b'without common ancestor - keeping local flags\n'
619 )
619 )
620 % afile
620 % afile
621 )
621 )
622 elif flags == fla:
622 elif flags == fla:
623 flags = flo
623 flags = flo
624 if preresolve:
624 if preresolve:
625 # restore local
625 # restore local
626 if localkey != nullhex:
626 if localkey != nullhex:
627 f = self._repo.vfs(b'merge/' + localkey)
627 f = self._repo.vfs(b'merge/' + localkey)
628 wctx[dfile].write(f.read(), flags)
628 wctx[dfile].write(f.read(), flags)
629 f.close()
629 f.close()
630 else:
630 else:
631 wctx[dfile].remove(ignoremissing=True)
631 wctx[dfile].remove(ignoremissing=True)
632 complete, r, deleted = filemerge.premerge(
632 complete, r, deleted = filemerge.premerge(
633 self._repo,
633 self._repo,
634 wctx,
634 wctx,
635 self._local,
635 self._local,
636 lfile,
636 lfile,
637 fcd,
637 fcd,
638 fco,
638 fco,
639 fca,
639 fca,
640 labels=self._labels,
640 labels=self._labels,
641 )
641 )
642 else:
642 else:
643 complete, r, deleted = filemerge.filemerge(
643 complete, r, deleted = filemerge.filemerge(
644 self._repo,
644 self._repo,
645 wctx,
645 wctx,
646 self._local,
646 self._local,
647 lfile,
647 lfile,
648 fcd,
648 fcd,
649 fco,
649 fco,
650 fca,
650 fca,
651 labels=self._labels,
651 labels=self._labels,
652 )
652 )
653 if r is None:
653 if r is None:
654 # no real conflict
654 # no real conflict
655 del self._state[dfile]
655 del self._state[dfile]
656 self._stateextras.pop(dfile, None)
656 self._stateextras.pop(dfile, None)
657 self._dirty = True
657 self._dirty = True
658 elif not r:
658 elif not r:
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
660
660
661 if complete:
661 if complete:
662 action = None
662 action = None
663 if deleted:
663 if deleted:
664 if fcd.isabsent():
664 if fcd.isabsent():
665 # dc: local picked. Need to drop if present, which may
665 # dc: local picked. Need to drop if present, which may
666 # happen on re-resolves.
666 # happen on re-resolves.
667 action = ACTION_FORGET
667 action = ACTION_FORGET
668 else:
668 else:
669 # cd: remote picked (or otherwise deleted)
669 # cd: remote picked (or otherwise deleted)
670 action = ACTION_REMOVE
670 action = ACTION_REMOVE
671 else:
671 else:
672 if fcd.isabsent(): # dc: remote picked
672 if fcd.isabsent(): # dc: remote picked
673 action = ACTION_GET
673 action = ACTION_GET
674 elif fco.isabsent(): # cd: local picked
674 elif fco.isabsent(): # cd: local picked
675 if dfile in self.localctx:
675 if dfile in self.localctx:
676 action = ACTION_ADD_MODIFIED
676 action = ACTION_ADD_MODIFIED
677 else:
677 else:
678 action = ACTION_ADD
678 action = ACTION_ADD
679 # else: regular merges (no action necessary)
679 # else: regular merges (no action necessary)
680 self._results[dfile] = r, action
680 self._results[dfile] = r, action
681
681
682 return complete, r
682 return complete, r
683
683
684 def _filectxorabsent(self, hexnode, ctx, f):
684 def _filectxorabsent(self, hexnode, ctx, f):
685 if hexnode == nullhex:
685 if hexnode == nullhex:
686 return filemerge.absentfilectx(ctx, f)
686 return filemerge.absentfilectx(ctx, f)
687 else:
687 else:
688 return ctx[f]
688 return ctx[f]
689
689
690 def preresolve(self, dfile, wctx):
690 def preresolve(self, dfile, wctx):
691 """run premerge process for dfile
691 """run premerge process for dfile
692
692
693 Returns whether the merge is complete, and the exit code."""
693 Returns whether the merge is complete, and the exit code."""
694 return self._resolve(True, dfile, wctx)
694 return self._resolve(True, dfile, wctx)
695
695
696 def resolve(self, dfile, wctx):
696 def resolve(self, dfile, wctx):
697 """run merge process (assuming premerge was run) for dfile
697 """run merge process (assuming premerge was run) for dfile
698
698
699 Returns the exit code of the merge."""
699 Returns the exit code of the merge."""
700 return self._resolve(False, dfile, wctx)[1]
700 return self._resolve(False, dfile, wctx)[1]
701
701
702 def counts(self):
702 def counts(self):
703 """return counts for updated, merged and removed files in this
703 """return counts for updated, merged and removed files in this
704 session"""
704 session"""
705 updated, merged, removed = 0, 0, 0
705 updated, merged, removed = 0, 0, 0
706 for r, action in pycompat.itervalues(self._results):
706 for r, action in pycompat.itervalues(self._results):
707 if r is None:
707 if r is None:
708 updated += 1
708 updated += 1
709 elif r == 0:
709 elif r == 0:
710 if action == ACTION_REMOVE:
710 if action == ACTION_REMOVE:
711 removed += 1
711 removed += 1
712 else:
712 else:
713 merged += 1
713 merged += 1
714 return updated, merged, removed
714 return updated, merged, removed
715
715
716 def unresolvedcount(self):
716 def unresolvedcount(self):
717 """get unresolved count for this merge (persistent)"""
717 """get unresolved count for this merge (persistent)"""
718 return len(list(self.unresolved()))
718 return len(list(self.unresolved()))
719
719
720 def actions(self):
720 def actions(self):
721 """return lists of actions to perform on the dirstate"""
721 """return lists of actions to perform on the dirstate"""
722 actions = {
722 actions = {
723 ACTION_REMOVE: [],
723 ACTION_REMOVE: [],
724 ACTION_FORGET: [],
724 ACTION_FORGET: [],
725 ACTION_ADD: [],
725 ACTION_ADD: [],
726 ACTION_ADD_MODIFIED: [],
726 ACTION_ADD_MODIFIED: [],
727 ACTION_GET: [],
727 ACTION_GET: [],
728 }
728 }
729 for f, (r, action) in pycompat.iteritems(self._results):
729 for f, (r, action) in pycompat.iteritems(self._results):
730 if action is not None:
730 if action is not None:
731 actions[action].append((f, None, b"merge result"))
731 actions[action].append((f, None, b"merge result"))
732 return actions
732 return actions
733
733
734 def recordactions(self):
734 def recordactions(self):
735 """record remove/add/get actions in the dirstate"""
735 """record remove/add/get actions in the dirstate"""
736 branchmerge = self._repo.dirstate.p2() != nullid
736 branchmerge = self._repo.dirstate.p2() != nullid
737 recordupdates(self._repo, self.actions(), branchmerge, None)
737 recordupdates(self._repo, self.actions(), branchmerge, None)
738
738
739 def queueremove(self, f):
739 def queueremove(self, f):
740 """queues a file to be removed from the dirstate
740 """queues a file to be removed from the dirstate
741
741
742 Meant for use by custom merge drivers."""
742 Meant for use by custom merge drivers."""
743 self._results[f] = 0, ACTION_REMOVE
743 self._results[f] = 0, ACTION_REMOVE
744
744
745 def queueadd(self, f):
745 def queueadd(self, f):
746 """queues a file to be added to the dirstate
746 """queues a file to be added to the dirstate
747
747
748 Meant for use by custom merge drivers."""
748 Meant for use by custom merge drivers."""
749 self._results[f] = 0, ACTION_ADD
749 self._results[f] = 0, ACTION_ADD
750
750
751 def queueget(self, f):
751 def queueget(self, f):
752 """queues a file to be marked modified in the dirstate
752 """queues a file to be marked modified in the dirstate
753
753
754 Meant for use by custom merge drivers."""
754 Meant for use by custom merge drivers."""
755 self._results[f] = 0, ACTION_GET
755 self._results[f] = 0, ACTION_GET
756
756
757
757
758 def _getcheckunknownconfig(repo, section, name):
758 def _getcheckunknownconfig(repo, section, name):
759 config = repo.ui.config(section, name)
759 config = repo.ui.config(section, name)
760 valid = [b'abort', b'ignore', b'warn']
760 valid = [b'abort', b'ignore', b'warn']
761 if config not in valid:
761 if config not in valid:
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
763 raise error.ConfigError(
763 raise error.ConfigError(
764 _(b"%s.%s not valid ('%s' is none of %s)")
764 _(b"%s.%s not valid ('%s' is none of %s)")
765 % (section, name, config, validstr)
765 % (section, name, config, validstr)
766 )
766 )
767 return config
767 return config
768
768
769
769
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
771 if wctx.isinmemory():
771 if wctx.isinmemory():
772 # Nothing to do in IMM because nothing in the "working copy" can be an
772 # Nothing to do in IMM because nothing in the "working copy" can be an
773 # unknown file.
773 # unknown file.
774 #
774 #
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
776 # because that function does other useful work.
776 # because that function does other useful work.
777 return False
777 return False
778
778
779 if f2 is None:
779 if f2 is None:
780 f2 = f
780 f2 = f
781 return (
781 return (
782 repo.wvfs.audit.check(f)
782 repo.wvfs.audit.check(f)
783 and repo.wvfs.isfileorlink(f)
783 and repo.wvfs.isfileorlink(f)
784 and repo.dirstate.normalize(f) not in repo.dirstate
784 and repo.dirstate.normalize(f) not in repo.dirstate
785 and mctx[f2].cmp(wctx[f])
785 and mctx[f2].cmp(wctx[f])
786 )
786 )
787
787
788
788
789 class _unknowndirschecker(object):
789 class _unknowndirschecker(object):
790 """
790 """
791 Look for any unknown files or directories that may have a path conflict
791 Look for any unknown files or directories that may have a path conflict
792 with a file. If any path prefix of the file exists as a file or link,
792 with a file. If any path prefix of the file exists as a file or link,
793 then it conflicts. If the file itself is a directory that contains any
793 then it conflicts. If the file itself is a directory that contains any
794 file that is not tracked, then it conflicts.
794 file that is not tracked, then it conflicts.
795
795
796 Returns the shortest path at which a conflict occurs, or None if there is
796 Returns the shortest path at which a conflict occurs, or None if there is
797 no conflict.
797 no conflict.
798 """
798 """
799
799
800 def __init__(self):
800 def __init__(self):
801 # A set of paths known to be good. This prevents repeated checking of
801 # A set of paths known to be good. This prevents repeated checking of
802 # dirs. It will be updated with any new dirs that are checked and found
802 # dirs. It will be updated with any new dirs that are checked and found
803 # to be safe.
803 # to be safe.
804 self._unknowndircache = set()
804 self._unknowndircache = set()
805
805
806 # A set of paths that are known to be absent. This prevents repeated
806 # A set of paths that are known to be absent. This prevents repeated
807 # checking of subdirectories that are known not to exist. It will be
807 # checking of subdirectories that are known not to exist. It will be
808 # updated with any new dirs that are checked and found to be absent.
808 # updated with any new dirs that are checked and found to be absent.
809 self._missingdircache = set()
809 self._missingdircache = set()
810
810
811 def __call__(self, repo, wctx, f):
811 def __call__(self, repo, wctx, f):
812 if wctx.isinmemory():
812 if wctx.isinmemory():
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
814 return False
814 return False
815
815
816 # Check for path prefixes that exist as unknown files.
816 # Check for path prefixes that exist as unknown files.
817 for p in reversed(list(pathutil.finddirs(f))):
817 for p in reversed(list(pathutil.finddirs(f))):
818 if p in self._missingdircache:
818 if p in self._missingdircache:
819 return
819 return
820 if p in self._unknowndircache:
820 if p in self._unknowndircache:
821 continue
821 continue
822 if repo.wvfs.audit.check(p):
822 if repo.wvfs.audit.check(p):
823 if (
823 if (
824 repo.wvfs.isfileorlink(p)
824 repo.wvfs.isfileorlink(p)
825 and repo.dirstate.normalize(p) not in repo.dirstate
825 and repo.dirstate.normalize(p) not in repo.dirstate
826 ):
826 ):
827 return p
827 return p
828 if not repo.wvfs.lexists(p):
828 if not repo.wvfs.lexists(p):
829 self._missingdircache.add(p)
829 self._missingdircache.add(p)
830 return
830 return
831 self._unknowndircache.add(p)
831 self._unknowndircache.add(p)
832
832
833 # Check if the file conflicts with a directory containing unknown files.
833 # Check if the file conflicts with a directory containing unknown files.
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
835 # Does the directory contain any files that are not in the dirstate?
835 # Does the directory contain any files that are not in the dirstate?
836 for p, dirs, files in repo.wvfs.walk(f):
836 for p, dirs, files in repo.wvfs.walk(f):
837 for fn in files:
837 for fn in files:
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
839 relf = repo.dirstate.normalize(relf, isknown=True)
839 relf = repo.dirstate.normalize(relf, isknown=True)
840 if relf not in repo.dirstate:
840 if relf not in repo.dirstate:
841 return f
841 return f
842 return None
842 return None
843
843
844
844
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
846 """
846 """
847 Considers any actions that care about the presence of conflicting unknown
847 Considers any actions that care about the presence of conflicting unknown
848 files. For some actions, the result is to abort; for others, it is to
848 files. For some actions, the result is to abort; for others, it is to
849 choose a different action.
849 choose a different action.
850 """
850 """
851 fileconflicts = set()
851 fileconflicts = set()
852 pathconflicts = set()
852 pathconflicts = set()
853 warnconflicts = set()
853 warnconflicts = set()
854 abortconflicts = set()
854 abortconflicts = set()
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
857 pathconfig = repo.ui.configbool(
857 pathconfig = repo.ui.configbool(
858 b'experimental', b'merge.checkpathconflicts'
858 b'experimental', b'merge.checkpathconflicts'
859 )
859 )
860 if not force:
860 if not force:
861
861
862 def collectconflicts(conflicts, config):
862 def collectconflicts(conflicts, config):
863 if config == b'abort':
863 if config == b'abort':
864 abortconflicts.update(conflicts)
864 abortconflicts.update(conflicts)
865 elif config == b'warn':
865 elif config == b'warn':
866 warnconflicts.update(conflicts)
866 warnconflicts.update(conflicts)
867
867
868 checkunknowndirs = _unknowndirschecker()
868 checkunknowndirs = _unknowndirschecker()
869 for f, (m, args, msg) in pycompat.iteritems(actions):
869 for f, (m, args, msg) in pycompat.iteritems(actions):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
871 if _checkunknownfile(repo, wctx, mctx, f):
871 if _checkunknownfile(repo, wctx, mctx, f):
872 fileconflicts.add(f)
872 fileconflicts.add(f)
873 elif pathconfig and f not in wctx:
873 elif pathconfig and f not in wctx:
874 path = checkunknowndirs(repo, wctx, f)
874 path = checkunknowndirs(repo, wctx, f)
875 if path is not None:
875 if path is not None:
876 pathconflicts.add(path)
876 pathconflicts.add(path)
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
879 fileconflicts.add(f)
879 fileconflicts.add(f)
880
880
881 allconflicts = fileconflicts | pathconflicts
881 allconflicts = fileconflicts | pathconflicts
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
883 unknownconflicts = allconflicts - ignoredconflicts
883 unknownconflicts = allconflicts - ignoredconflicts
884 collectconflicts(ignoredconflicts, ignoredconfig)
884 collectconflicts(ignoredconflicts, ignoredconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
886 else:
886 else:
887 for f, (m, args, msg) in pycompat.iteritems(actions):
887 for f, (m, args, msg) in pycompat.iteritems(actions):
888 if m == ACTION_CREATED_MERGE:
888 if m == ACTION_CREATED_MERGE:
889 fl2, anc = args
889 fl2, anc = args
890 different = _checkunknownfile(repo, wctx, mctx, f)
890 different = _checkunknownfile(repo, wctx, mctx, f)
891 if repo.dirstate._ignore(f):
891 if repo.dirstate._ignore(f):
892 config = ignoredconfig
892 config = ignoredconfig
893 else:
893 else:
894 config = unknownconfig
894 config = unknownconfig
895
895
896 # The behavior when force is True is described by this table:
896 # The behavior when force is True is described by this table:
897 # config different mergeforce | action backup
897 # config different mergeforce | action backup
898 # * n * | get n
898 # * n * | get n
899 # * y y | merge -
899 # * y y | merge -
900 # abort y n | merge - (1)
900 # abort y n | merge - (1)
901 # warn y n | warn + get y
901 # warn y n | warn + get y
902 # ignore y n | get y
902 # ignore y n | get y
903 #
903 #
904 # (1) this is probably the wrong behavior here -- we should
904 # (1) this is probably the wrong behavior here -- we should
905 # probably abort, but some actions like rebases currently
905 # probably abort, but some actions like rebases currently
906 # don't like an abort happening in the middle of
906 # don't like an abort happening in the middle of
907 # merge.update.
907 # merge.update.
908 if not different:
908 if not different:
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
910 elif mergeforce or config == b'abort':
910 elif mergeforce or config == b'abort':
911 actions[f] = (
911 actions[f] = (
912 ACTION_MERGE,
912 ACTION_MERGE,
913 (f, f, None, False, anc),
913 (f, f, None, False, anc),
914 b'remote differs from untracked local',
914 b'remote differs from untracked local',
915 )
915 )
916 elif config == b'abort':
916 elif config == b'abort':
917 abortconflicts.add(f)
917 abortconflicts.add(f)
918 else:
918 else:
919 if config == b'warn':
919 if config == b'warn':
920 warnconflicts.add(f)
920 warnconflicts.add(f)
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
922
922
923 for f in sorted(abortconflicts):
923 for f in sorted(abortconflicts):
924 warn = repo.ui.warn
924 warn = repo.ui.warn
925 if f in pathconflicts:
925 if f in pathconflicts:
926 if repo.wvfs.isfileorlink(f):
926 if repo.wvfs.isfileorlink(f):
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
928 else:
928 else:
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
930 else:
930 else:
931 warn(_(b"%s: untracked file differs\n") % f)
931 warn(_(b"%s: untracked file differs\n") % f)
932 if abortconflicts:
932 if abortconflicts:
933 raise error.Abort(
933 raise error.Abort(
934 _(
934 _(
935 b"untracked files in working directory "
935 b"untracked files in working directory "
936 b"differ from files in requested revision"
936 b"differ from files in requested revision"
937 )
937 )
938 )
938 )
939
939
940 for f in sorted(warnconflicts):
940 for f in sorted(warnconflicts):
941 if repo.wvfs.isfileorlink(f):
941 if repo.wvfs.isfileorlink(f):
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
943 else:
943 else:
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
945
945
946 for f, (m, args, msg) in pycompat.iteritems(actions):
946 for f, (m, args, msg) in pycompat.iteritems(actions):
947 if m == ACTION_CREATED:
947 if m == ACTION_CREATED:
948 backup = (
948 backup = (
949 f in fileconflicts
949 f in fileconflicts
950 or f in pathconflicts
950 or f in pathconflicts
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
952 )
952 )
953 (flags,) = args
953 (flags,) = args
954 actions[f] = (ACTION_GET, (flags, backup), msg)
954 actions[f] = (ACTION_GET, (flags, backup), msg)
955
955
956
956
957 def _forgetremoved(wctx, mctx, branchmerge):
957 def _forgetremoved(wctx, mctx, branchmerge):
958 """
958 """
959 Forget removed files
959 Forget removed files
960
960
961 If we're jumping between revisions (as opposed to merging), and if
961 If we're jumping between revisions (as opposed to merging), and if
962 neither the working directory nor the target rev has the file,
962 neither the working directory nor the target rev has the file,
963 then we need to remove it from the dirstate, to prevent the
963 then we need to remove it from the dirstate, to prevent the
964 dirstate from listing the file when it is no longer in the
964 dirstate from listing the file when it is no longer in the
965 manifest.
965 manifest.
966
966
967 If we're merging, and the other revision has removed a file
967 If we're merging, and the other revision has removed a file
968 that is not present in the working directory, we need to mark it
968 that is not present in the working directory, we need to mark it
969 as removed.
969 as removed.
970 """
970 """
971
971
972 actions = {}
972 actions = {}
973 m = ACTION_FORGET
973 m = ACTION_FORGET
974 if branchmerge:
974 if branchmerge:
975 m = ACTION_REMOVE
975 m = ACTION_REMOVE
976 for f in wctx.deleted():
976 for f in wctx.deleted():
977 if f not in mctx:
977 if f not in mctx:
978 actions[f] = m, None, b"forget deleted"
978 actions[f] = m, None, b"forget deleted"
979
979
980 if not branchmerge:
980 if not branchmerge:
981 for f in wctx.removed():
981 for f in wctx.removed():
982 if f not in mctx:
982 if f not in mctx:
983 actions[f] = ACTION_FORGET, None, b"forget removed"
983 actions[f] = ACTION_FORGET, None, b"forget removed"
984
984
985 return actions
985 return actions
986
986
987
987
988 def _checkcollision(repo, wmf, actions):
988 def _checkcollision(repo, wmf, actions):
989 """
989 """
990 Check for case-folding collisions.
990 Check for case-folding collisions.
991 """
991 """
992
992
993 # If the repo is narrowed, filter out files outside the narrowspec.
993 # If the repo is narrowed, filter out files outside the narrowspec.
994 narrowmatch = repo.narrowmatch()
994 narrowmatch = repo.narrowmatch()
995 if not narrowmatch.always():
995 if not narrowmatch.always():
996 wmf = wmf.matches(narrowmatch)
996 wmf = wmf.matches(narrowmatch)
997 if actions:
997 if actions:
998 narrowactions = {}
998 narrowactions = {}
999 for m, actionsfortype in pycompat.iteritems(actions):
999 for m, actionsfortype in pycompat.iteritems(actions):
1000 narrowactions[m] = []
1000 narrowactions[m] = []
1001 for (f, args, msg) in actionsfortype:
1001 for (f, args, msg) in actionsfortype:
1002 if narrowmatch(f):
1002 if narrowmatch(f):
1003 narrowactions[m].append((f, args, msg))
1003 narrowactions[m].append((f, args, msg))
1004 actions = narrowactions
1004 actions = narrowactions
1005
1005
1006 # build provisional merged manifest up
1006 # build provisional merged manifest up
1007 pmmf = set(wmf)
1007 pmmf = set(wmf)
1008
1008
1009 if actions:
1009 if actions:
1010 # KEEP and EXEC are no-op
1010 # KEEP and EXEC are no-op
1011 for m in (
1011 for m in (
1012 ACTION_ADD,
1012 ACTION_ADD,
1013 ACTION_ADD_MODIFIED,
1013 ACTION_ADD_MODIFIED,
1014 ACTION_FORGET,
1014 ACTION_FORGET,
1015 ACTION_GET,
1015 ACTION_GET,
1016 ACTION_CHANGED_DELETED,
1016 ACTION_CHANGED_DELETED,
1017 ACTION_DELETED_CHANGED,
1017 ACTION_DELETED_CHANGED,
1018 ):
1018 ):
1019 for f, args, msg in actions[m]:
1019 for f, args, msg in actions[m]:
1020 pmmf.add(f)
1020 pmmf.add(f)
1021 for f, args, msg in actions[ACTION_REMOVE]:
1021 for f, args, msg in actions[ACTION_REMOVE]:
1022 pmmf.discard(f)
1022 pmmf.discard(f)
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1024 f2, flags = args
1024 f2, flags = args
1025 pmmf.discard(f2)
1025 pmmf.discard(f2)
1026 pmmf.add(f)
1026 pmmf.add(f)
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1028 pmmf.add(f)
1028 pmmf.add(f)
1029 for f, args, msg in actions[ACTION_MERGE]:
1029 for f, args, msg in actions[ACTION_MERGE]:
1030 f1, f2, fa, move, anc = args
1030 f1, f2, fa, move, anc = args
1031 if move:
1031 if move:
1032 pmmf.discard(f1)
1032 pmmf.discard(f1)
1033 pmmf.add(f)
1033 pmmf.add(f)
1034
1034
1035 # check case-folding collision in provisional merged manifest
1035 # check case-folding collision in provisional merged manifest
1036 foldmap = {}
1036 foldmap = {}
1037 for f in pmmf:
1037 for f in pmmf:
1038 fold = util.normcase(f)
1038 fold = util.normcase(f)
1039 if fold in foldmap:
1039 if fold in foldmap:
1040 raise error.Abort(
1040 raise error.Abort(
1041 _(b"case-folding collision between %s and %s")
1041 _(b"case-folding collision between %s and %s")
1042 % (f, foldmap[fold])
1042 % (f, foldmap[fold])
1043 )
1043 )
1044 foldmap[fold] = f
1044 foldmap[fold] = f
1045
1045
1046 # check case-folding of directories
1046 # check case-folding of directories
1047 foldprefix = unfoldprefix = lastfull = b''
1047 foldprefix = unfoldprefix = lastfull = b''
1048 for fold, f in sorted(foldmap.items()):
1048 for fold, f in sorted(foldmap.items()):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1050 # the folded prefix matches but actual casing is different
1050 # the folded prefix matches but actual casing is different
1051 raise error.Abort(
1051 raise error.Abort(
1052 _(b"case-folding collision between %s and directory of %s")
1052 _(b"case-folding collision between %s and directory of %s")
1053 % (lastfull, f)
1053 % (lastfull, f)
1054 )
1054 )
1055 foldprefix = fold + b'/'
1055 foldprefix = fold + b'/'
1056 unfoldprefix = f + b'/'
1056 unfoldprefix = f + b'/'
1057 lastfull = f
1057 lastfull = f
1058
1058
1059
1059
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1061 """run the preprocess step of the merge driver, if any
1061 """run the preprocess step of the merge driver, if any
1062
1062
1063 This is currently not implemented -- it's an extension point."""
1063 This is currently not implemented -- it's an extension point."""
1064 return True
1064 return True
1065
1065
1066
1066
1067 def driverconclude(repo, ms, wctx, labels=None):
1067 def driverconclude(repo, ms, wctx, labels=None):
1068 """run the conclude step of the merge driver, if any
1068 """run the conclude step of the merge driver, if any
1069
1069
1070 This is currently not implemented -- it's an extension point."""
1070 This is currently not implemented -- it's an extension point."""
1071 return True
1071 return True
1072
1072
1073
1073
1074 def _filesindirs(repo, manifest, dirs):
1074 def _filesindirs(repo, manifest, dirs):
1075 """
1075 """
1076 Generator that yields pairs of all the files in the manifest that are found
1076 Generator that yields pairs of all the files in the manifest that are found
1077 inside the directories listed in dirs, and which directory they are found
1077 inside the directories listed in dirs, and which directory they are found
1078 in.
1078 in.
1079 """
1079 """
1080 for f in manifest:
1080 for f in manifest:
1081 for p in pathutil.finddirs(f):
1081 for p in pathutil.finddirs(f):
1082 if p in dirs:
1082 if p in dirs:
1083 yield f, p
1083 yield f, p
1084 break
1084 break
1085
1085
1086
1086
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1088 """
1088 """
1089 Check if any actions introduce path conflicts in the repository, updating
1089 Check if any actions introduce path conflicts in the repository, updating
1090 actions to record or handle the path conflict accordingly.
1090 actions to record or handle the path conflict accordingly.
1091 """
1091 """
1092 mf = wctx.manifest()
1092 mf = wctx.manifest()
1093
1093
1094 # The set of local files that conflict with a remote directory.
1094 # The set of local files that conflict with a remote directory.
1095 localconflicts = set()
1095 localconflicts = set()
1096
1096
1097 # The set of directories that conflict with a remote file, and so may cause
1097 # The set of directories that conflict with a remote file, and so may cause
1098 # conflicts if they still contain any files after the merge.
1098 # conflicts if they still contain any files after the merge.
1099 remoteconflicts = set()
1099 remoteconflicts = set()
1100
1100
1101 # The set of directories that appear as both a file and a directory in the
1101 # The set of directories that appear as both a file and a directory in the
1102 # remote manifest. These indicate an invalid remote manifest, which
1102 # remote manifest. These indicate an invalid remote manifest, which
1103 # can't be updated to cleanly.
1103 # can't be updated to cleanly.
1104 invalidconflicts = set()
1104 invalidconflicts = set()
1105
1105
1106 # The set of directories that contain files that are being created.
1106 # The set of directories that contain files that are being created.
1107 createdfiledirs = set()
1107 createdfiledirs = set()
1108
1108
1109 # The set of files deleted by all the actions.
1109 # The set of files deleted by all the actions.
1110 deletedfiles = set()
1110 deletedfiles = set()
1111
1111
1112 for f, (m, args, msg) in actions.items():
1112 for f, (m, args, msg) in actions.items():
1113 if m in (
1113 if m in (
1114 ACTION_CREATED,
1114 ACTION_CREATED,
1115 ACTION_DELETED_CHANGED,
1115 ACTION_DELETED_CHANGED,
1116 ACTION_MERGE,
1116 ACTION_MERGE,
1117 ACTION_CREATED_MERGE,
1117 ACTION_CREATED_MERGE,
1118 ):
1118 ):
1119 # This action may create a new local file.
1119 # This action may create a new local file.
1120 createdfiledirs.update(pathutil.finddirs(f))
1120 createdfiledirs.update(pathutil.finddirs(f))
1121 if mf.hasdir(f):
1121 if mf.hasdir(f):
1122 # The file aliases a local directory. This might be ok if all
1122 # The file aliases a local directory. This might be ok if all
1123 # the files in the local directory are being deleted. This
1123 # the files in the local directory are being deleted. This
1124 # will be checked once we know what all the deleted files are.
1124 # will be checked once we know what all the deleted files are.
1125 remoteconflicts.add(f)
1125 remoteconflicts.add(f)
1126 # Track the names of all deleted files.
1126 # Track the names of all deleted files.
1127 if m == ACTION_REMOVE:
1127 if m == ACTION_REMOVE:
1128 deletedfiles.add(f)
1128 deletedfiles.add(f)
1129 if m == ACTION_MERGE:
1129 if m == ACTION_MERGE:
1130 f1, f2, fa, move, anc = args
1130 f1, f2, fa, move, anc = args
1131 if move:
1131 if move:
1132 deletedfiles.add(f1)
1132 deletedfiles.add(f1)
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1134 f2, flags = args
1134 f2, flags = args
1135 deletedfiles.add(f2)
1135 deletedfiles.add(f2)
1136
1136
1137 # Check all directories that contain created files for path conflicts.
1137 # Check all directories that contain created files for path conflicts.
1138 for p in createdfiledirs:
1138 for p in createdfiledirs:
1139 if p in mf:
1139 if p in mf:
1140 if p in mctx:
1140 if p in mctx:
1141 # A file is in a directory which aliases both a local
1141 # A file is in a directory which aliases both a local
1142 # and a remote file. This is an internal inconsistency
1142 # and a remote file. This is an internal inconsistency
1143 # within the remote manifest.
1143 # within the remote manifest.
1144 invalidconflicts.add(p)
1144 invalidconflicts.add(p)
1145 else:
1145 else:
1146 # A file is in a directory which aliases a local file.
1146 # A file is in a directory which aliases a local file.
1147 # We will need to rename the local file.
1147 # We will need to rename the local file.
1148 localconflicts.add(p)
1148 localconflicts.add(p)
1149 if p in actions and actions[p][0] in (
1149 if p in actions and actions[p][0] in (
1150 ACTION_CREATED,
1150 ACTION_CREATED,
1151 ACTION_DELETED_CHANGED,
1151 ACTION_DELETED_CHANGED,
1152 ACTION_MERGE,
1152 ACTION_MERGE,
1153 ACTION_CREATED_MERGE,
1153 ACTION_CREATED_MERGE,
1154 ):
1154 ):
1155 # The file is in a directory which aliases a remote file.
1155 # The file is in a directory which aliases a remote file.
1156 # This is an internal inconsistency within the remote
1156 # This is an internal inconsistency within the remote
1157 # manifest.
1157 # manifest.
1158 invalidconflicts.add(p)
1158 invalidconflicts.add(p)
1159
1159
1160 # Rename all local conflicting files that have not been deleted.
1160 # Rename all local conflicting files that have not been deleted.
1161 for p in localconflicts:
1161 for p in localconflicts:
1162 if p not in deletedfiles:
1162 if p not in deletedfiles:
1163 ctxname = bytes(wctx).rstrip(b'+')
1163 ctxname = bytes(wctx).rstrip(b'+')
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1165 actions[pnew] = (
1165 actions[pnew] = (
1166 ACTION_PATH_CONFLICT_RESOLVE,
1166 ACTION_PATH_CONFLICT_RESOLVE,
1167 (p,),
1167 (p,),
1168 b'local path conflict',
1168 b'local path conflict',
1169 )
1169 )
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1171
1171
1172 if remoteconflicts:
1172 if remoteconflicts:
1173 # Check if all files in the conflicting directories have been removed.
1173 # Check if all files in the conflicting directories have been removed.
1174 ctxname = bytes(mctx).rstrip(b'+')
1174 ctxname = bytes(mctx).rstrip(b'+')
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1176 if f not in deletedfiles:
1176 if f not in deletedfiles:
1177 m, args, msg = actions[p]
1177 m, args, msg = actions[p]
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1180 # Action was merge, just update target.
1180 # Action was merge, just update target.
1181 actions[pnew] = (m, args, msg)
1181 actions[pnew] = (m, args, msg)
1182 else:
1182 else:
1183 # Action was create, change to renamed get action.
1183 # Action was create, change to renamed get action.
1184 fl = args[0]
1184 fl = args[0]
1185 actions[pnew] = (
1185 actions[pnew] = (
1186 ACTION_LOCAL_DIR_RENAME_GET,
1186 ACTION_LOCAL_DIR_RENAME_GET,
1187 (p, fl),
1187 (p, fl),
1188 b'remote path conflict',
1188 b'remote path conflict',
1189 )
1189 )
1190 actions[p] = (
1190 actions[p] = (
1191 ACTION_PATH_CONFLICT,
1191 ACTION_PATH_CONFLICT,
1192 (pnew, ACTION_REMOVE),
1192 (pnew, ACTION_REMOVE),
1193 b'path conflict',
1193 b'path conflict',
1194 )
1194 )
1195 remoteconflicts.remove(p)
1195 remoteconflicts.remove(p)
1196 break
1196 break
1197
1197
1198 if invalidconflicts:
1198 if invalidconflicts:
1199 for p in invalidconflicts:
1199 for p in invalidconflicts:
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1202
1202
1203
1203
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1205 """
1205 """
1206 Filters out actions that can ignored because the repo is narrowed.
1206 Filters out actions that can ignored because the repo is narrowed.
1207
1207
1208 Raise an exception if the merge cannot be completed because the repo is
1208 Raise an exception if the merge cannot be completed because the repo is
1209 narrowed.
1209 narrowed.
1210 """
1210 """
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1213 # We mutate the items in the dict during iteration, so iterate
1213 # We mutate the items in the dict during iteration, so iterate
1214 # over a copy.
1214 # over a copy.
1215 for f, action in list(actions.items()):
1215 for f, action in list(actions.items()):
1216 if narrowmatch(f):
1216 if narrowmatch(f):
1217 pass
1217 pass
1218 elif not branchmerge:
1218 elif not branchmerge:
1219 del actions[f] # just updating, ignore changes outside clone
1219 del actions[f] # just updating, ignore changes outside clone
1220 elif action[0] in nooptypes:
1220 elif action[0] in nooptypes:
1221 del actions[f] # merge does not affect file
1221 del actions[f] # merge does not affect file
1222 elif action[0] in nonconflicttypes:
1222 elif action[0] in nonconflicttypes:
1223 raise error.Abort(
1223 raise error.Abort(
1224 _(
1224 _(
1225 b'merge affects file \'%s\' outside narrow, '
1225 b'merge affects file \'%s\' outside narrow, '
1226 b'which is not yet supported'
1226 b'which is not yet supported'
1227 )
1227 )
1228 % f,
1228 % f,
1229 hint=_(b'merging in the other direction may work'),
1229 hint=_(b'merging in the other direction may work'),
1230 )
1230 )
1231 else:
1231 else:
1232 raise error.Abort(
1232 raise error.Abort(
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1234 )
1234 )
1235
1235
1236
1236
1237 def manifestmerge(
1237 def manifestmerge(
1238 repo,
1238 repo,
1239 wctx,
1239 wctx,
1240 p2,
1240 p2,
1241 pa,
1241 pa,
1242 branchmerge,
1242 branchmerge,
1243 force,
1243 force,
1244 matcher,
1244 matcher,
1245 acceptremote,
1245 acceptremote,
1246 followcopies,
1246 followcopies,
1247 forcefulldiff=False,
1247 forcefulldiff=False,
1248 ):
1248 ):
1249 """
1249 """
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1251
1251
1252 branchmerge and force are as passed in to update
1252 branchmerge and force are as passed in to update
1253 matcher = matcher to filter file lists
1253 matcher = matcher to filter file lists
1254 acceptremote = accept the incoming changes without prompting
1254 acceptremote = accept the incoming changes without prompting
1255 """
1255 """
1256 if matcher is not None and matcher.always():
1256 if matcher is not None and matcher.always():
1257 matcher = None
1257 matcher = None
1258
1258
1259 # manifests fetched in order are going to be faster, so prime the caches
1259 # manifests fetched in order are going to be faster, so prime the caches
1260 [
1260 [
1261 x.manifest()
1261 x.manifest()
1262 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1262 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1263 ]
1263 ]
1264
1264
1265 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1265 branch_copies1 = copies.branch_copies()
1266 branch_copies2 = copies.branch_copies()
1267 diverge = {}
1266 if followcopies:
1268 if followcopies:
1267 branch_copies, diverge = copies.mergecopies(repo, wctx, p2, pa)
1269 branch_copies1, branch_copies2, diverge = copies.mergecopies(
1268 copy = branch_copies.copy
1270 repo, wctx, p2, pa
1269 renamedelete = branch_copies.renamedelete
1271 )
1270 dirmove = branch_copies.dirmove
1271 movewithdir = branch_copies.movewithdir
1272
1272
1273 boolbm = pycompat.bytestr(bool(branchmerge))
1273 boolbm = pycompat.bytestr(bool(branchmerge))
1274 boolf = pycompat.bytestr(bool(force))
1274 boolf = pycompat.bytestr(bool(force))
1275 boolm = pycompat.bytestr(bool(matcher))
1275 boolm = pycompat.bytestr(bool(matcher))
1276 repo.ui.note(_(b"resolving manifests\n"))
1276 repo.ui.note(_(b"resolving manifests\n"))
1277 repo.ui.debug(
1277 repo.ui.debug(
1278 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1278 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1279 )
1279 )
1280 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1280 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1281
1281
1282 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1282 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1283 copied = set(copy.values())
1283 copied1 = set(branch_copies1.copy.values())
1284 copied.update(movewithdir.values())
1284 copied1.update(branch_copies1.movewithdir.values())
1285 copied2 = set(branch_copies2.copy.values())
1286 copied2.update(branch_copies2.movewithdir.values())
1285
1287
1286 if b'.hgsubstate' in m1 and wctx.rev() is None:
1288 if b'.hgsubstate' in m1 and wctx.rev() is None:
1287 # Check whether sub state is modified, and overwrite the manifest
1289 # Check whether sub state is modified, and overwrite the manifest
1288 # to flag the change. If wctx is a committed revision, we shouldn't
1290 # to flag the change. If wctx is a committed revision, we shouldn't
1289 # care for the dirty state of the working directory.
1291 # care for the dirty state of the working directory.
1290 if any(wctx.sub(s).dirty() for s in wctx.substate):
1292 if any(wctx.sub(s).dirty() for s in wctx.substate):
1291 m1[b'.hgsubstate'] = modifiednodeid
1293 m1[b'.hgsubstate'] = modifiednodeid
1292
1294
1293 # Don't use m2-vs-ma optimization if:
1295 # Don't use m2-vs-ma optimization if:
1294 # - ma is the same as m1 or m2, which we're just going to diff again later
1296 # - ma is the same as m1 or m2, which we're just going to diff again later
1295 # - The caller specifically asks for a full diff, which is useful during bid
1297 # - The caller specifically asks for a full diff, which is useful during bid
1296 # merge.
1298 # merge.
1297 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1299 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1298 # Identify which files are relevant to the merge, so we can limit the
1300 # Identify which files are relevant to the merge, so we can limit the
1299 # total m1-vs-m2 diff to just those files. This has significant
1301 # total m1-vs-m2 diff to just those files. This has significant
1300 # performance benefits in large repositories.
1302 # performance benefits in large repositories.
1301 relevantfiles = set(ma.diff(m2).keys())
1303 relevantfiles = set(ma.diff(m2).keys())
1302
1304
1303 # For copied and moved files, we need to add the source file too.
1305 # For copied and moved files, we need to add the source file too.
1304 for copykey, copyvalue in pycompat.iteritems(copy):
1306 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
1305 if copyvalue in relevantfiles:
1307 if copyvalue in relevantfiles:
1306 relevantfiles.add(copykey)
1308 relevantfiles.add(copykey)
1307 for movedirkey in movewithdir:
1309 for movedirkey in branch_copies1.movewithdir:
1308 relevantfiles.add(movedirkey)
1310 relevantfiles.add(movedirkey)
1309 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1311 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1310 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1312 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1311
1313
1312 diff = m1.diff(m2, match=matcher)
1314 diff = m1.diff(m2, match=matcher)
1313
1315
1314 actions = {}
1316 actions = {}
1315 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1317 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1316 if n1 and n2: # file exists on both local and remote side
1318 if n1 and n2: # file exists on both local and remote side
1317 if f not in ma:
1319 if f not in ma:
1318 fa = copy.get(f, None)
1320 # TODO: what if they're renamed from different sources?
1321 fa = branch_copies1.copy.get(
1322 f, None
1323 ) or branch_copies2.copy.get(f, None)
1319 if fa is not None:
1324 if fa is not None:
1320 actions[f] = (
1325 actions[f] = (
1321 ACTION_MERGE,
1326 ACTION_MERGE,
1322 (f, f, fa, False, pa.node()),
1327 (f, f, fa, False, pa.node()),
1323 b'both renamed from %s' % fa,
1328 b'both renamed from %s' % fa,
1324 )
1329 )
1325 else:
1330 else:
1326 actions[f] = (
1331 actions[f] = (
1327 ACTION_MERGE,
1332 ACTION_MERGE,
1328 (f, f, None, False, pa.node()),
1333 (f, f, None, False, pa.node()),
1329 b'both created',
1334 b'both created',
1330 )
1335 )
1331 else:
1336 else:
1332 a = ma[f]
1337 a = ma[f]
1333 fla = ma.flags(f)
1338 fla = ma.flags(f)
1334 nol = b'l' not in fl1 + fl2 + fla
1339 nol = b'l' not in fl1 + fl2 + fla
1335 if n2 == a and fl2 == fla:
1340 if n2 == a and fl2 == fla:
1336 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1341 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1337 elif n1 == a and fl1 == fla: # local unchanged - use remote
1342 elif n1 == a and fl1 == fla: # local unchanged - use remote
1338 if n1 == n2: # optimization: keep local content
1343 if n1 == n2: # optimization: keep local content
1339 actions[f] = (
1344 actions[f] = (
1340 ACTION_EXEC,
1345 ACTION_EXEC,
1341 (fl2,),
1346 (fl2,),
1342 b'update permissions',
1347 b'update permissions',
1343 )
1348 )
1344 else:
1349 else:
1345 actions[f] = (
1350 actions[f] = (
1346 ACTION_GET,
1351 ACTION_GET,
1347 (fl2, False),
1352 (fl2, False),
1348 b'remote is newer',
1353 b'remote is newer',
1349 )
1354 )
1350 elif nol and n2 == a: # remote only changed 'x'
1355 elif nol and n2 == a: # remote only changed 'x'
1351 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1356 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1352 elif nol and n1 == a: # local only changed 'x'
1357 elif nol and n1 == a: # local only changed 'x'
1353 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1358 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1354 else: # both changed something
1359 else: # both changed something
1355 actions[f] = (
1360 actions[f] = (
1356 ACTION_MERGE,
1361 ACTION_MERGE,
1357 (f, f, f, False, pa.node()),
1362 (f, f, f, False, pa.node()),
1358 b'versions differ',
1363 b'versions differ',
1359 )
1364 )
1360 elif n1: # file exists only on local side
1365 elif n1: # file exists only on local side
1361 if f in copied:
1366 if f in copied2:
1362 pass # we'll deal with it on m2 side
1367 pass # we'll deal with it on m2 side
1363 elif f in movewithdir: # directory rename, move local
1368 elif (
1364 f2 = movewithdir[f]
1369 f in branch_copies1.movewithdir
1370 ): # directory rename, move local
1371 f2 = branch_copies1.movewithdir[f]
1365 if f2 in m2:
1372 if f2 in m2:
1366 actions[f2] = (
1373 actions[f2] = (
1367 ACTION_MERGE,
1374 ACTION_MERGE,
1368 (f, f2, None, True, pa.node()),
1375 (f, f2, None, True, pa.node()),
1369 b'remote directory rename, both created',
1376 b'remote directory rename, both created',
1370 )
1377 )
1371 else:
1378 else:
1372 actions[f2] = (
1379 actions[f2] = (
1373 ACTION_DIR_RENAME_MOVE_LOCAL,
1380 ACTION_DIR_RENAME_MOVE_LOCAL,
1374 (f, fl1),
1381 (f, fl1),
1375 b'remote directory rename - move from %s' % f,
1382 b'remote directory rename - move from %s' % f,
1376 )
1383 )
1377 elif f in copy:
1384 elif f in branch_copies1.copy:
1378 f2 = copy[f]
1385 f2 = branch_copies1.copy[f]
1379 actions[f] = (
1386 actions[f] = (
1380 ACTION_MERGE,
1387 ACTION_MERGE,
1381 (f, f2, f2, False, pa.node()),
1388 (f, f2, f2, False, pa.node()),
1382 b'local copied/moved from %s' % f2,
1389 b'local copied/moved from %s' % f2,
1383 )
1390 )
1384 elif f in ma: # clean, a different, no remote
1391 elif f in ma: # clean, a different, no remote
1385 if n1 != ma[f]:
1392 if n1 != ma[f]:
1386 if acceptremote:
1393 if acceptremote:
1387 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1394 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1388 else:
1395 else:
1389 actions[f] = (
1396 actions[f] = (
1390 ACTION_CHANGED_DELETED,
1397 ACTION_CHANGED_DELETED,
1391 (f, None, f, False, pa.node()),
1398 (f, None, f, False, pa.node()),
1392 b'prompt changed/deleted',
1399 b'prompt changed/deleted',
1393 )
1400 )
1394 elif n1 == addednodeid:
1401 elif n1 == addednodeid:
1395 # This extra 'a' is added by working copy manifest to mark
1402 # This extra 'a' is added by working copy manifest to mark
1396 # the file as locally added. We should forget it instead of
1403 # the file as locally added. We should forget it instead of
1397 # deleting it.
1404 # deleting it.
1398 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1405 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1399 else:
1406 else:
1400 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1407 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1401 elif n2: # file exists only on remote side
1408 elif n2: # file exists only on remote side
1402 if f in copied:
1409 if f in copied1:
1403 pass # we'll deal with it on m1 side
1410 pass # we'll deal with it on m1 side
1404 elif f in movewithdir:
1411 elif f in branch_copies2.movewithdir:
1405 f2 = movewithdir[f]
1412 f2 = branch_copies2.movewithdir[f]
1406 if f2 in m1:
1413 if f2 in m1:
1407 actions[f2] = (
1414 actions[f2] = (
1408 ACTION_MERGE,
1415 ACTION_MERGE,
1409 (f2, f, None, False, pa.node()),
1416 (f2, f, None, False, pa.node()),
1410 b'local directory rename, both created',
1417 b'local directory rename, both created',
1411 )
1418 )
1412 else:
1419 else:
1413 actions[f2] = (
1420 actions[f2] = (
1414 ACTION_LOCAL_DIR_RENAME_GET,
1421 ACTION_LOCAL_DIR_RENAME_GET,
1415 (f, fl2),
1422 (f, fl2),
1416 b'local directory rename - get from %s' % f,
1423 b'local directory rename - get from %s' % f,
1417 )
1424 )
1418 elif f in copy:
1425 elif f in branch_copies2.copy:
1419 f2 = copy[f]
1426 f2 = branch_copies2.copy[f]
1420 if f2 in m2:
1427 if f2 in m2:
1421 actions[f] = (
1428 actions[f] = (
1422 ACTION_MERGE,
1429 ACTION_MERGE,
1423 (f2, f, f2, False, pa.node()),
1430 (f2, f, f2, False, pa.node()),
1424 b'remote copied from %s' % f2,
1431 b'remote copied from %s' % f2,
1425 )
1432 )
1426 else:
1433 else:
1427 actions[f] = (
1434 actions[f] = (
1428 ACTION_MERGE,
1435 ACTION_MERGE,
1429 (f2, f, f2, True, pa.node()),
1436 (f2, f, f2, True, pa.node()),
1430 b'remote moved from %s' % f2,
1437 b'remote moved from %s' % f2,
1431 )
1438 )
1432 elif f not in ma:
1439 elif f not in ma:
1433 # local unknown, remote created: the logic is described by the
1440 # local unknown, remote created: the logic is described by the
1434 # following table:
1441 # following table:
1435 #
1442 #
1436 # force branchmerge different | action
1443 # force branchmerge different | action
1437 # n * * | create
1444 # n * * | create
1438 # y n * | create
1445 # y n * | create
1439 # y y n | create
1446 # y y n | create
1440 # y y y | merge
1447 # y y y | merge
1441 #
1448 #
1442 # Checking whether the files are different is expensive, so we
1449 # Checking whether the files are different is expensive, so we
1443 # don't do that when we can avoid it.
1450 # don't do that when we can avoid it.
1444 if not force:
1451 if not force:
1445 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1452 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1446 elif not branchmerge:
1453 elif not branchmerge:
1447 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1454 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1448 else:
1455 else:
1449 actions[f] = (
1456 actions[f] = (
1450 ACTION_CREATED_MERGE,
1457 ACTION_CREATED_MERGE,
1451 (fl2, pa.node()),
1458 (fl2, pa.node()),
1452 b'remote created, get or merge',
1459 b'remote created, get or merge',
1453 )
1460 )
1454 elif n2 != ma[f]:
1461 elif n2 != ma[f]:
1455 df = None
1462 df = None
1456 for d in dirmove:
1463 for d in branch_copies1.dirmove:
1457 if f.startswith(d):
1464 if f.startswith(d):
1458 # new file added in a directory that was moved
1465 # new file added in a directory that was moved
1459 df = dirmove[d] + f[len(d) :]
1466 df = branch_copies1.dirmove[d] + f[len(d) :]
1460 break
1467 break
1461 if df is not None and df in m1:
1468 if df is not None and df in m1:
1462 actions[df] = (
1469 actions[df] = (
1463 ACTION_MERGE,
1470 ACTION_MERGE,
1464 (df, f, f, False, pa.node()),
1471 (df, f, f, False, pa.node()),
1465 b'local directory rename - respect move '
1472 b'local directory rename - respect move '
1466 b'from %s' % f,
1473 b'from %s' % f,
1467 )
1474 )
1468 elif acceptremote:
1475 elif acceptremote:
1469 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1476 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1470 else:
1477 else:
1471 actions[f] = (
1478 actions[f] = (
1472 ACTION_DELETED_CHANGED,
1479 ACTION_DELETED_CHANGED,
1473 (None, f, f, False, pa.node()),
1480 (None, f, f, False, pa.node()),
1474 b'prompt deleted/changed',
1481 b'prompt deleted/changed',
1475 )
1482 )
1476
1483
1477 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1484 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1478 # If we are merging, look for path conflicts.
1485 # If we are merging, look for path conflicts.
1479 checkpathconflicts(repo, wctx, p2, actions)
1486 checkpathconflicts(repo, wctx, p2, actions)
1480
1487
1481 narrowmatch = repo.narrowmatch()
1488 narrowmatch = repo.narrowmatch()
1482 if not narrowmatch.always():
1489 if not narrowmatch.always():
1483 # Updates "actions" in place
1490 # Updates "actions" in place
1484 _filternarrowactions(narrowmatch, branchmerge, actions)
1491 _filternarrowactions(narrowmatch, branchmerge, actions)
1485
1492
1493 renamedelete = branch_copies1.renamedelete
1494 renamedelete.update(branch_copies2.renamedelete)
1495
1486 return actions, diverge, renamedelete
1496 return actions, diverge, renamedelete
1487
1497
1488
1498
1489 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1499 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1490 """Resolves false conflicts where the nodeid changed but the content
1500 """Resolves false conflicts where the nodeid changed but the content
1491 remained the same."""
1501 remained the same."""
1492 # We force a copy of actions.items() because we're going to mutate
1502 # We force a copy of actions.items() because we're going to mutate
1493 # actions as we resolve trivial conflicts.
1503 # actions as we resolve trivial conflicts.
1494 for f, (m, args, msg) in list(actions.items()):
1504 for f, (m, args, msg) in list(actions.items()):
1495 if (
1505 if (
1496 m == ACTION_CHANGED_DELETED
1506 m == ACTION_CHANGED_DELETED
1497 and f in ancestor
1507 and f in ancestor
1498 and not wctx[f].cmp(ancestor[f])
1508 and not wctx[f].cmp(ancestor[f])
1499 ):
1509 ):
1500 # local did change but ended up with same content
1510 # local did change but ended up with same content
1501 actions[f] = ACTION_REMOVE, None, b'prompt same'
1511 actions[f] = ACTION_REMOVE, None, b'prompt same'
1502 elif (
1512 elif (
1503 m == ACTION_DELETED_CHANGED
1513 m == ACTION_DELETED_CHANGED
1504 and f in ancestor
1514 and f in ancestor
1505 and not mctx[f].cmp(ancestor[f])
1515 and not mctx[f].cmp(ancestor[f])
1506 ):
1516 ):
1507 # remote did change but ended up with same content
1517 # remote did change but ended up with same content
1508 del actions[f] # don't get = keep local deleted
1518 del actions[f] # don't get = keep local deleted
1509
1519
1510
1520
1511 def calculateupdates(
1521 def calculateupdates(
1512 repo,
1522 repo,
1513 wctx,
1523 wctx,
1514 mctx,
1524 mctx,
1515 ancestors,
1525 ancestors,
1516 branchmerge,
1526 branchmerge,
1517 force,
1527 force,
1518 acceptremote,
1528 acceptremote,
1519 followcopies,
1529 followcopies,
1520 matcher=None,
1530 matcher=None,
1521 mergeforce=False,
1531 mergeforce=False,
1522 ):
1532 ):
1523 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1533 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1524 # Avoid cycle.
1534 # Avoid cycle.
1525 from . import sparse
1535 from . import sparse
1526
1536
1527 if len(ancestors) == 1: # default
1537 if len(ancestors) == 1: # default
1528 actions, diverge, renamedelete = manifestmerge(
1538 actions, diverge, renamedelete = manifestmerge(
1529 repo,
1539 repo,
1530 wctx,
1540 wctx,
1531 mctx,
1541 mctx,
1532 ancestors[0],
1542 ancestors[0],
1533 branchmerge,
1543 branchmerge,
1534 force,
1544 force,
1535 matcher,
1545 matcher,
1536 acceptremote,
1546 acceptremote,
1537 followcopies,
1547 followcopies,
1538 )
1548 )
1539 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1549 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1540
1550
1541 else: # only when merge.preferancestor=* - the default
1551 else: # only when merge.preferancestor=* - the default
1542 repo.ui.note(
1552 repo.ui.note(
1543 _(b"note: merging %s and %s using bids from ancestors %s\n")
1553 _(b"note: merging %s and %s using bids from ancestors %s\n")
1544 % (
1554 % (
1545 wctx,
1555 wctx,
1546 mctx,
1556 mctx,
1547 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1557 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1548 )
1558 )
1549 )
1559 )
1550
1560
1551 # Call for bids
1561 # Call for bids
1552 fbids = (
1562 fbids = (
1553 {}
1563 {}
1554 ) # mapping filename to bids (action method to list af actions)
1564 ) # mapping filename to bids (action method to list af actions)
1555 diverge, renamedelete = None, None
1565 diverge, renamedelete = None, None
1556 for ancestor in ancestors:
1566 for ancestor in ancestors:
1557 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1567 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1558 actions, diverge1, renamedelete1 = manifestmerge(
1568 actions, diverge1, renamedelete1 = manifestmerge(
1559 repo,
1569 repo,
1560 wctx,
1570 wctx,
1561 mctx,
1571 mctx,
1562 ancestor,
1572 ancestor,
1563 branchmerge,
1573 branchmerge,
1564 force,
1574 force,
1565 matcher,
1575 matcher,
1566 acceptremote,
1576 acceptremote,
1567 followcopies,
1577 followcopies,
1568 forcefulldiff=True,
1578 forcefulldiff=True,
1569 )
1579 )
1570 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1580 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1571
1581
1572 # Track the shortest set of warning on the theory that bid
1582 # Track the shortest set of warning on the theory that bid
1573 # merge will correctly incorporate more information
1583 # merge will correctly incorporate more information
1574 if diverge is None or len(diverge1) < len(diverge):
1584 if diverge is None or len(diverge1) < len(diverge):
1575 diverge = diverge1
1585 diverge = diverge1
1576 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1586 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1577 renamedelete = renamedelete1
1587 renamedelete = renamedelete1
1578
1588
1579 for f, a in sorted(pycompat.iteritems(actions)):
1589 for f, a in sorted(pycompat.iteritems(actions)):
1580 m, args, msg = a
1590 m, args, msg = a
1581 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1591 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1582 if f in fbids:
1592 if f in fbids:
1583 d = fbids[f]
1593 d = fbids[f]
1584 if m in d:
1594 if m in d:
1585 d[m].append(a)
1595 d[m].append(a)
1586 else:
1596 else:
1587 d[m] = [a]
1597 d[m] = [a]
1588 else:
1598 else:
1589 fbids[f] = {m: [a]}
1599 fbids[f] = {m: [a]}
1590
1600
1591 # Pick the best bid for each file
1601 # Pick the best bid for each file
1592 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1602 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1593 actions = {}
1603 actions = {}
1594 for f, bids in sorted(fbids.items()):
1604 for f, bids in sorted(fbids.items()):
1595 # bids is a mapping from action method to list af actions
1605 # bids is a mapping from action method to list af actions
1596 # Consensus?
1606 # Consensus?
1597 if len(bids) == 1: # all bids are the same kind of method
1607 if len(bids) == 1: # all bids are the same kind of method
1598 m, l = list(bids.items())[0]
1608 m, l = list(bids.items())[0]
1599 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1609 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1600 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1610 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1601 actions[f] = l[0]
1611 actions[f] = l[0]
1602 continue
1612 continue
1603 # If keep is an option, just do it.
1613 # If keep is an option, just do it.
1604 if ACTION_KEEP in bids:
1614 if ACTION_KEEP in bids:
1605 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1615 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1606 actions[f] = bids[ACTION_KEEP][0]
1616 actions[f] = bids[ACTION_KEEP][0]
1607 continue
1617 continue
1608 # If there are gets and they all agree [how could they not?], do it.
1618 # If there are gets and they all agree [how could they not?], do it.
1609 if ACTION_GET in bids:
1619 if ACTION_GET in bids:
1610 ga0 = bids[ACTION_GET][0]
1620 ga0 = bids[ACTION_GET][0]
1611 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1621 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1612 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1622 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1613 actions[f] = ga0
1623 actions[f] = ga0
1614 continue
1624 continue
1615 # TODO: Consider other simple actions such as mode changes
1625 # TODO: Consider other simple actions such as mode changes
1616 # Handle inefficient democrazy.
1626 # Handle inefficient democrazy.
1617 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1627 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1618 for m, l in sorted(bids.items()):
1628 for m, l in sorted(bids.items()):
1619 for _f, args, msg in l:
1629 for _f, args, msg in l:
1620 repo.ui.note(b' %s -> %s\n' % (msg, m))
1630 repo.ui.note(b' %s -> %s\n' % (msg, m))
1621 # Pick random action. TODO: Instead, prompt user when resolving
1631 # Pick random action. TODO: Instead, prompt user when resolving
1622 m, l = list(bids.items())[0]
1632 m, l = list(bids.items())[0]
1623 repo.ui.warn(
1633 repo.ui.warn(
1624 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1634 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1625 )
1635 )
1626 actions[f] = l[0]
1636 actions[f] = l[0]
1627 continue
1637 continue
1628 repo.ui.note(_(b'end of auction\n\n'))
1638 repo.ui.note(_(b'end of auction\n\n'))
1629
1639
1630 if wctx.rev() is None:
1640 if wctx.rev() is None:
1631 fractions = _forgetremoved(wctx, mctx, branchmerge)
1641 fractions = _forgetremoved(wctx, mctx, branchmerge)
1632 actions.update(fractions)
1642 actions.update(fractions)
1633
1643
1634 prunedactions = sparse.filterupdatesactions(
1644 prunedactions = sparse.filterupdatesactions(
1635 repo, wctx, mctx, branchmerge, actions
1645 repo, wctx, mctx, branchmerge, actions
1636 )
1646 )
1637 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1647 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1638
1648
1639 return prunedactions, diverge, renamedelete
1649 return prunedactions, diverge, renamedelete
1640
1650
1641
1651
1642 def _getcwd():
1652 def _getcwd():
1643 try:
1653 try:
1644 return encoding.getcwd()
1654 return encoding.getcwd()
1645 except OSError as err:
1655 except OSError as err:
1646 if err.errno == errno.ENOENT:
1656 if err.errno == errno.ENOENT:
1647 return None
1657 return None
1648 raise
1658 raise
1649
1659
1650
1660
1651 def batchremove(repo, wctx, actions):
1661 def batchremove(repo, wctx, actions):
1652 """apply removes to the working directory
1662 """apply removes to the working directory
1653
1663
1654 yields tuples for progress updates
1664 yields tuples for progress updates
1655 """
1665 """
1656 verbose = repo.ui.verbose
1666 verbose = repo.ui.verbose
1657 cwd = _getcwd()
1667 cwd = _getcwd()
1658 i = 0
1668 i = 0
1659 for f, args, msg in actions:
1669 for f, args, msg in actions:
1660 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1670 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1661 if verbose:
1671 if verbose:
1662 repo.ui.note(_(b"removing %s\n") % f)
1672 repo.ui.note(_(b"removing %s\n") % f)
1663 wctx[f].audit()
1673 wctx[f].audit()
1664 try:
1674 try:
1665 wctx[f].remove(ignoremissing=True)
1675 wctx[f].remove(ignoremissing=True)
1666 except OSError as inst:
1676 except OSError as inst:
1667 repo.ui.warn(
1677 repo.ui.warn(
1668 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1678 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1669 )
1679 )
1670 if i == 100:
1680 if i == 100:
1671 yield i, f
1681 yield i, f
1672 i = 0
1682 i = 0
1673 i += 1
1683 i += 1
1674 if i > 0:
1684 if i > 0:
1675 yield i, f
1685 yield i, f
1676
1686
1677 if cwd and not _getcwd():
1687 if cwd and not _getcwd():
1678 # cwd was removed in the course of removing files; print a helpful
1688 # cwd was removed in the course of removing files; print a helpful
1679 # warning.
1689 # warning.
1680 repo.ui.warn(
1690 repo.ui.warn(
1681 _(
1691 _(
1682 b"current directory was removed\n"
1692 b"current directory was removed\n"
1683 b"(consider changing to repo root: %s)\n"
1693 b"(consider changing to repo root: %s)\n"
1684 )
1694 )
1685 % repo.root
1695 % repo.root
1686 )
1696 )
1687
1697
1688
1698
1689 def batchget(repo, mctx, wctx, wantfiledata, actions):
1699 def batchget(repo, mctx, wctx, wantfiledata, actions):
1690 """apply gets to the working directory
1700 """apply gets to the working directory
1691
1701
1692 mctx is the context to get from
1702 mctx is the context to get from
1693
1703
1694 Yields arbitrarily many (False, tuple) for progress updates, followed by
1704 Yields arbitrarily many (False, tuple) for progress updates, followed by
1695 exactly one (True, filedata). When wantfiledata is false, filedata is an
1705 exactly one (True, filedata). When wantfiledata is false, filedata is an
1696 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1706 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1697 mtime) of the file f written for each action.
1707 mtime) of the file f written for each action.
1698 """
1708 """
1699 filedata = {}
1709 filedata = {}
1700 verbose = repo.ui.verbose
1710 verbose = repo.ui.verbose
1701 fctx = mctx.filectx
1711 fctx = mctx.filectx
1702 ui = repo.ui
1712 ui = repo.ui
1703 i = 0
1713 i = 0
1704 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1714 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1705 for f, (flags, backup), msg in actions:
1715 for f, (flags, backup), msg in actions:
1706 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1716 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1707 if verbose:
1717 if verbose:
1708 repo.ui.note(_(b"getting %s\n") % f)
1718 repo.ui.note(_(b"getting %s\n") % f)
1709
1719
1710 if backup:
1720 if backup:
1711 # If a file or directory exists with the same name, back that
1721 # If a file or directory exists with the same name, back that
1712 # up. Otherwise, look to see if there is a file that conflicts
1722 # up. Otherwise, look to see if there is a file that conflicts
1713 # with a directory this file is in, and if so, back that up.
1723 # with a directory this file is in, and if so, back that up.
1714 conflicting = f
1724 conflicting = f
1715 if not repo.wvfs.lexists(f):
1725 if not repo.wvfs.lexists(f):
1716 for p in pathutil.finddirs(f):
1726 for p in pathutil.finddirs(f):
1717 if repo.wvfs.isfileorlink(p):
1727 if repo.wvfs.isfileorlink(p):
1718 conflicting = p
1728 conflicting = p
1719 break
1729 break
1720 if repo.wvfs.lexists(conflicting):
1730 if repo.wvfs.lexists(conflicting):
1721 orig = scmutil.backuppath(ui, repo, conflicting)
1731 orig = scmutil.backuppath(ui, repo, conflicting)
1722 util.rename(repo.wjoin(conflicting), orig)
1732 util.rename(repo.wjoin(conflicting), orig)
1723 wfctx = wctx[f]
1733 wfctx = wctx[f]
1724 wfctx.clearunknown()
1734 wfctx.clearunknown()
1725 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1735 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1726 size = wfctx.write(
1736 size = wfctx.write(
1727 fctx(f).data(),
1737 fctx(f).data(),
1728 flags,
1738 flags,
1729 backgroundclose=True,
1739 backgroundclose=True,
1730 atomictemp=atomictemp,
1740 atomictemp=atomictemp,
1731 )
1741 )
1732 if wantfiledata:
1742 if wantfiledata:
1733 s = wfctx.lstat()
1743 s = wfctx.lstat()
1734 mode = s.st_mode
1744 mode = s.st_mode
1735 mtime = s[stat.ST_MTIME]
1745 mtime = s[stat.ST_MTIME]
1736 filedata[f] = (mode, size, mtime) # for dirstate.normal
1746 filedata[f] = (mode, size, mtime) # for dirstate.normal
1737 if i == 100:
1747 if i == 100:
1738 yield False, (i, f)
1748 yield False, (i, f)
1739 i = 0
1749 i = 0
1740 i += 1
1750 i += 1
1741 if i > 0:
1751 if i > 0:
1742 yield False, (i, f)
1752 yield False, (i, f)
1743 yield True, filedata
1753 yield True, filedata
1744
1754
1745
1755
1746 def _prefetchfiles(repo, ctx, actions):
1756 def _prefetchfiles(repo, ctx, actions):
1747 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1757 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1748 of merge actions. ``ctx`` is the context being merged in."""
1758 of merge actions. ``ctx`` is the context being merged in."""
1749
1759
1750 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1760 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1751 # don't touch the context to be merged in. 'cd' is skipped, because
1761 # don't touch the context to be merged in. 'cd' is skipped, because
1752 # changed/deleted never resolves to something from the remote side.
1762 # changed/deleted never resolves to something from the remote side.
1753 oplist = [
1763 oplist = [
1754 actions[a]
1764 actions[a]
1755 for a in (
1765 for a in (
1756 ACTION_GET,
1766 ACTION_GET,
1757 ACTION_DELETED_CHANGED,
1767 ACTION_DELETED_CHANGED,
1758 ACTION_LOCAL_DIR_RENAME_GET,
1768 ACTION_LOCAL_DIR_RENAME_GET,
1759 ACTION_MERGE,
1769 ACTION_MERGE,
1760 )
1770 )
1761 ]
1771 ]
1762 prefetch = scmutil.prefetchfiles
1772 prefetch = scmutil.prefetchfiles
1763 matchfiles = scmutil.matchfiles
1773 matchfiles = scmutil.matchfiles
1764 prefetch(
1774 prefetch(
1765 repo,
1775 repo,
1766 [ctx.rev()],
1776 [ctx.rev()],
1767 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1777 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1768 )
1778 )
1769
1779
1770
1780
1771 @attr.s(frozen=True)
1781 @attr.s(frozen=True)
1772 class updateresult(object):
1782 class updateresult(object):
1773 updatedcount = attr.ib()
1783 updatedcount = attr.ib()
1774 mergedcount = attr.ib()
1784 mergedcount = attr.ib()
1775 removedcount = attr.ib()
1785 removedcount = attr.ib()
1776 unresolvedcount = attr.ib()
1786 unresolvedcount = attr.ib()
1777
1787
1778 def isempty(self):
1788 def isempty(self):
1779 return not (
1789 return not (
1780 self.updatedcount
1790 self.updatedcount
1781 or self.mergedcount
1791 or self.mergedcount
1782 or self.removedcount
1792 or self.removedcount
1783 or self.unresolvedcount
1793 or self.unresolvedcount
1784 )
1794 )
1785
1795
1786
1796
1787 def emptyactions():
1797 def emptyactions():
1788 """create an actions dict, to be populated and passed to applyupdates()"""
1798 """create an actions dict, to be populated and passed to applyupdates()"""
1789 return dict(
1799 return dict(
1790 (m, [])
1800 (m, [])
1791 for m in (
1801 for m in (
1792 ACTION_ADD,
1802 ACTION_ADD,
1793 ACTION_ADD_MODIFIED,
1803 ACTION_ADD_MODIFIED,
1794 ACTION_FORGET,
1804 ACTION_FORGET,
1795 ACTION_GET,
1805 ACTION_GET,
1796 ACTION_CHANGED_DELETED,
1806 ACTION_CHANGED_DELETED,
1797 ACTION_DELETED_CHANGED,
1807 ACTION_DELETED_CHANGED,
1798 ACTION_REMOVE,
1808 ACTION_REMOVE,
1799 ACTION_DIR_RENAME_MOVE_LOCAL,
1809 ACTION_DIR_RENAME_MOVE_LOCAL,
1800 ACTION_LOCAL_DIR_RENAME_GET,
1810 ACTION_LOCAL_DIR_RENAME_GET,
1801 ACTION_MERGE,
1811 ACTION_MERGE,
1802 ACTION_EXEC,
1812 ACTION_EXEC,
1803 ACTION_KEEP,
1813 ACTION_KEEP,
1804 ACTION_PATH_CONFLICT,
1814 ACTION_PATH_CONFLICT,
1805 ACTION_PATH_CONFLICT_RESOLVE,
1815 ACTION_PATH_CONFLICT_RESOLVE,
1806 )
1816 )
1807 )
1817 )
1808
1818
1809
1819
1810 def applyupdates(
1820 def applyupdates(
1811 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1821 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1812 ):
1822 ):
1813 """apply the merge action list to the working directory
1823 """apply the merge action list to the working directory
1814
1824
1815 wctx is the working copy context
1825 wctx is the working copy context
1816 mctx is the context to be merged into the working copy
1826 mctx is the context to be merged into the working copy
1817
1827
1818 Return a tuple of (counts, filedata), where counts is a tuple
1828 Return a tuple of (counts, filedata), where counts is a tuple
1819 (updated, merged, removed, unresolved) that describes how many
1829 (updated, merged, removed, unresolved) that describes how many
1820 files were affected by the update, and filedata is as described in
1830 files were affected by the update, and filedata is as described in
1821 batchget.
1831 batchget.
1822 """
1832 """
1823
1833
1824 _prefetchfiles(repo, mctx, actions)
1834 _prefetchfiles(repo, mctx, actions)
1825
1835
1826 updated, merged, removed = 0, 0, 0
1836 updated, merged, removed = 0, 0, 0
1827 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1837 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1828 moves = []
1838 moves = []
1829 for m, l in actions.items():
1839 for m, l in actions.items():
1830 l.sort()
1840 l.sort()
1831
1841
1832 # 'cd' and 'dc' actions are treated like other merge conflicts
1842 # 'cd' and 'dc' actions are treated like other merge conflicts
1833 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1843 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1834 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1844 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1835 mergeactions.extend(actions[ACTION_MERGE])
1845 mergeactions.extend(actions[ACTION_MERGE])
1836 for f, args, msg in mergeactions:
1846 for f, args, msg in mergeactions:
1837 f1, f2, fa, move, anc = args
1847 f1, f2, fa, move, anc = args
1838 if f == b'.hgsubstate': # merged internally
1848 if f == b'.hgsubstate': # merged internally
1839 continue
1849 continue
1840 if f1 is None:
1850 if f1 is None:
1841 fcl = filemerge.absentfilectx(wctx, fa)
1851 fcl = filemerge.absentfilectx(wctx, fa)
1842 else:
1852 else:
1843 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1853 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1844 fcl = wctx[f1]
1854 fcl = wctx[f1]
1845 if f2 is None:
1855 if f2 is None:
1846 fco = filemerge.absentfilectx(mctx, fa)
1856 fco = filemerge.absentfilectx(mctx, fa)
1847 else:
1857 else:
1848 fco = mctx[f2]
1858 fco = mctx[f2]
1849 actx = repo[anc]
1859 actx = repo[anc]
1850 if fa in actx:
1860 if fa in actx:
1851 fca = actx[fa]
1861 fca = actx[fa]
1852 else:
1862 else:
1853 # TODO: move to absentfilectx
1863 # TODO: move to absentfilectx
1854 fca = repo.filectx(f1, fileid=nullrev)
1864 fca = repo.filectx(f1, fileid=nullrev)
1855 ms.add(fcl, fco, fca, f)
1865 ms.add(fcl, fco, fca, f)
1856 if f1 != f and move:
1866 if f1 != f and move:
1857 moves.append(f1)
1867 moves.append(f1)
1858
1868
1859 # remove renamed files after safely stored
1869 # remove renamed files after safely stored
1860 for f in moves:
1870 for f in moves:
1861 if wctx[f].lexists():
1871 if wctx[f].lexists():
1862 repo.ui.debug(b"removing %s\n" % f)
1872 repo.ui.debug(b"removing %s\n" % f)
1863 wctx[f].audit()
1873 wctx[f].audit()
1864 wctx[f].remove()
1874 wctx[f].remove()
1865
1875
1866 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1876 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1867 progress = repo.ui.makeprogress(
1877 progress = repo.ui.makeprogress(
1868 _(b'updating'), unit=_(b'files'), total=numupdates
1878 _(b'updating'), unit=_(b'files'), total=numupdates
1869 )
1879 )
1870
1880
1871 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1881 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1872 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1882 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1873
1883
1874 # record path conflicts
1884 # record path conflicts
1875 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1885 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1876 f1, fo = args
1886 f1, fo = args
1877 s = repo.ui.status
1887 s = repo.ui.status
1878 s(
1888 s(
1879 _(
1889 _(
1880 b"%s: path conflict - a file or link has the same name as a "
1890 b"%s: path conflict - a file or link has the same name as a "
1881 b"directory\n"
1891 b"directory\n"
1882 )
1892 )
1883 % f
1893 % f
1884 )
1894 )
1885 if fo == b'l':
1895 if fo == b'l':
1886 s(_(b"the local file has been renamed to %s\n") % f1)
1896 s(_(b"the local file has been renamed to %s\n") % f1)
1887 else:
1897 else:
1888 s(_(b"the remote file has been renamed to %s\n") % f1)
1898 s(_(b"the remote file has been renamed to %s\n") % f1)
1889 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1899 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1890 ms.addpath(f, f1, fo)
1900 ms.addpath(f, f1, fo)
1891 progress.increment(item=f)
1901 progress.increment(item=f)
1892
1902
1893 # When merging in-memory, we can't support worker processes, so set the
1903 # When merging in-memory, we can't support worker processes, so set the
1894 # per-item cost at 0 in that case.
1904 # per-item cost at 0 in that case.
1895 cost = 0 if wctx.isinmemory() else 0.001
1905 cost = 0 if wctx.isinmemory() else 0.001
1896
1906
1897 # remove in parallel (must come before resolving path conflicts and getting)
1907 # remove in parallel (must come before resolving path conflicts and getting)
1898 prog = worker.worker(
1908 prog = worker.worker(
1899 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1909 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1900 )
1910 )
1901 for i, item in prog:
1911 for i, item in prog:
1902 progress.increment(step=i, item=item)
1912 progress.increment(step=i, item=item)
1903 removed = len(actions[ACTION_REMOVE])
1913 removed = len(actions[ACTION_REMOVE])
1904
1914
1905 # resolve path conflicts (must come before getting)
1915 # resolve path conflicts (must come before getting)
1906 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1916 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1907 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1917 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1908 (f0,) = args
1918 (f0,) = args
1909 if wctx[f0].lexists():
1919 if wctx[f0].lexists():
1910 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1920 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1911 wctx[f].audit()
1921 wctx[f].audit()
1912 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1922 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1913 wctx[f0].remove()
1923 wctx[f0].remove()
1914 progress.increment(item=f)
1924 progress.increment(item=f)
1915
1925
1916 # get in parallel.
1926 # get in parallel.
1917 threadsafe = repo.ui.configbool(
1927 threadsafe = repo.ui.configbool(
1918 b'experimental', b'worker.wdir-get-thread-safe'
1928 b'experimental', b'worker.wdir-get-thread-safe'
1919 )
1929 )
1920 prog = worker.worker(
1930 prog = worker.worker(
1921 repo.ui,
1931 repo.ui,
1922 cost,
1932 cost,
1923 batchget,
1933 batchget,
1924 (repo, mctx, wctx, wantfiledata),
1934 (repo, mctx, wctx, wantfiledata),
1925 actions[ACTION_GET],
1935 actions[ACTION_GET],
1926 threadsafe=threadsafe,
1936 threadsafe=threadsafe,
1927 hasretval=True,
1937 hasretval=True,
1928 )
1938 )
1929 getfiledata = {}
1939 getfiledata = {}
1930 for final, res in prog:
1940 for final, res in prog:
1931 if final:
1941 if final:
1932 getfiledata = res
1942 getfiledata = res
1933 else:
1943 else:
1934 i, item = res
1944 i, item = res
1935 progress.increment(step=i, item=item)
1945 progress.increment(step=i, item=item)
1936 updated = len(actions[ACTION_GET])
1946 updated = len(actions[ACTION_GET])
1937
1947
1938 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1948 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1939 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1949 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1940
1950
1941 # forget (manifest only, just log it) (must come first)
1951 # forget (manifest only, just log it) (must come first)
1942 for f, args, msg in actions[ACTION_FORGET]:
1952 for f, args, msg in actions[ACTION_FORGET]:
1943 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1953 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1944 progress.increment(item=f)
1954 progress.increment(item=f)
1945
1955
1946 # re-add (manifest only, just log it)
1956 # re-add (manifest only, just log it)
1947 for f, args, msg in actions[ACTION_ADD]:
1957 for f, args, msg in actions[ACTION_ADD]:
1948 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1958 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1949 progress.increment(item=f)
1959 progress.increment(item=f)
1950
1960
1951 # re-add/mark as modified (manifest only, just log it)
1961 # re-add/mark as modified (manifest only, just log it)
1952 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1962 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1953 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1963 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1954 progress.increment(item=f)
1964 progress.increment(item=f)
1955
1965
1956 # keep (noop, just log it)
1966 # keep (noop, just log it)
1957 for f, args, msg in actions[ACTION_KEEP]:
1967 for f, args, msg in actions[ACTION_KEEP]:
1958 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1968 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1959 # no progress
1969 # no progress
1960
1970
1961 # directory rename, move local
1971 # directory rename, move local
1962 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1972 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1963 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1973 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1964 progress.increment(item=f)
1974 progress.increment(item=f)
1965 f0, flags = args
1975 f0, flags = args
1966 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1976 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1967 wctx[f].audit()
1977 wctx[f].audit()
1968 wctx[f].write(wctx.filectx(f0).data(), flags)
1978 wctx[f].write(wctx.filectx(f0).data(), flags)
1969 wctx[f0].remove()
1979 wctx[f0].remove()
1970 updated += 1
1980 updated += 1
1971
1981
1972 # local directory rename, get
1982 # local directory rename, get
1973 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1983 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1974 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1984 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1975 progress.increment(item=f)
1985 progress.increment(item=f)
1976 f0, flags = args
1986 f0, flags = args
1977 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1987 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1978 wctx[f].write(mctx.filectx(f0).data(), flags)
1988 wctx[f].write(mctx.filectx(f0).data(), flags)
1979 updated += 1
1989 updated += 1
1980
1990
1981 # exec
1991 # exec
1982 for f, args, msg in actions[ACTION_EXEC]:
1992 for f, args, msg in actions[ACTION_EXEC]:
1983 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1993 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1984 progress.increment(item=f)
1994 progress.increment(item=f)
1985 (flags,) = args
1995 (flags,) = args
1986 wctx[f].audit()
1996 wctx[f].audit()
1987 wctx[f].setflags(b'l' in flags, b'x' in flags)
1997 wctx[f].setflags(b'l' in flags, b'x' in flags)
1988 updated += 1
1998 updated += 1
1989
1999
1990 # the ordering is important here -- ms.mergedriver will raise if the merge
2000 # the ordering is important here -- ms.mergedriver will raise if the merge
1991 # driver has changed, and we want to be able to bypass it when overwrite is
2001 # driver has changed, and we want to be able to bypass it when overwrite is
1992 # True
2002 # True
1993 usemergedriver = not overwrite and mergeactions and ms.mergedriver
2003 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1994
2004
1995 if usemergedriver:
2005 if usemergedriver:
1996 if wctx.isinmemory():
2006 if wctx.isinmemory():
1997 raise error.InMemoryMergeConflictsError(
2007 raise error.InMemoryMergeConflictsError(
1998 b"in-memory merge does not support mergedriver"
2008 b"in-memory merge does not support mergedriver"
1999 )
2009 )
2000 ms.commit()
2010 ms.commit()
2001 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2011 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2002 # the driver might leave some files unresolved
2012 # the driver might leave some files unresolved
2003 unresolvedf = set(ms.unresolved())
2013 unresolvedf = set(ms.unresolved())
2004 if not proceed:
2014 if not proceed:
2005 # XXX setting unresolved to at least 1 is a hack to make sure we
2015 # XXX setting unresolved to at least 1 is a hack to make sure we
2006 # error out
2016 # error out
2007 return updateresult(
2017 return updateresult(
2008 updated, merged, removed, max(len(unresolvedf), 1)
2018 updated, merged, removed, max(len(unresolvedf), 1)
2009 )
2019 )
2010 newactions = []
2020 newactions = []
2011 for f, args, msg in mergeactions:
2021 for f, args, msg in mergeactions:
2012 if f in unresolvedf:
2022 if f in unresolvedf:
2013 newactions.append((f, args, msg))
2023 newactions.append((f, args, msg))
2014 mergeactions = newactions
2024 mergeactions = newactions
2015
2025
2016 try:
2026 try:
2017 # premerge
2027 # premerge
2018 tocomplete = []
2028 tocomplete = []
2019 for f, args, msg in mergeactions:
2029 for f, args, msg in mergeactions:
2020 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2030 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2021 progress.increment(item=f)
2031 progress.increment(item=f)
2022 if f == b'.hgsubstate': # subrepo states need updating
2032 if f == b'.hgsubstate': # subrepo states need updating
2023 subrepoutil.submerge(
2033 subrepoutil.submerge(
2024 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2034 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2025 )
2035 )
2026 continue
2036 continue
2027 wctx[f].audit()
2037 wctx[f].audit()
2028 complete, r = ms.preresolve(f, wctx)
2038 complete, r = ms.preresolve(f, wctx)
2029 if not complete:
2039 if not complete:
2030 numupdates += 1
2040 numupdates += 1
2031 tocomplete.append((f, args, msg))
2041 tocomplete.append((f, args, msg))
2032
2042
2033 # merge
2043 # merge
2034 for f, args, msg in tocomplete:
2044 for f, args, msg in tocomplete:
2035 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2045 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2036 progress.increment(item=f, total=numupdates)
2046 progress.increment(item=f, total=numupdates)
2037 ms.resolve(f, wctx)
2047 ms.resolve(f, wctx)
2038
2048
2039 finally:
2049 finally:
2040 ms.commit()
2050 ms.commit()
2041
2051
2042 unresolved = ms.unresolvedcount()
2052 unresolved = ms.unresolvedcount()
2043
2053
2044 if (
2054 if (
2045 usemergedriver
2055 usemergedriver
2046 and not unresolved
2056 and not unresolved
2047 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2057 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2048 ):
2058 ):
2049 if not driverconclude(repo, ms, wctx, labels=labels):
2059 if not driverconclude(repo, ms, wctx, labels=labels):
2050 # XXX setting unresolved to at least 1 is a hack to make sure we
2060 # XXX setting unresolved to at least 1 is a hack to make sure we
2051 # error out
2061 # error out
2052 unresolved = max(unresolved, 1)
2062 unresolved = max(unresolved, 1)
2053
2063
2054 ms.commit()
2064 ms.commit()
2055
2065
2056 msupdated, msmerged, msremoved = ms.counts()
2066 msupdated, msmerged, msremoved = ms.counts()
2057 updated += msupdated
2067 updated += msupdated
2058 merged += msmerged
2068 merged += msmerged
2059 removed += msremoved
2069 removed += msremoved
2060
2070
2061 extraactions = ms.actions()
2071 extraactions = ms.actions()
2062 if extraactions:
2072 if extraactions:
2063 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2073 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2064 for k, acts in pycompat.iteritems(extraactions):
2074 for k, acts in pycompat.iteritems(extraactions):
2065 actions[k].extend(acts)
2075 actions[k].extend(acts)
2066 if k == ACTION_GET and wantfiledata:
2076 if k == ACTION_GET and wantfiledata:
2067 # no filedata until mergestate is updated to provide it
2077 # no filedata until mergestate is updated to provide it
2068 for a in acts:
2078 for a in acts:
2069 getfiledata[a[0]] = None
2079 getfiledata[a[0]] = None
2070 # Remove these files from actions[ACTION_MERGE] as well. This is
2080 # Remove these files from actions[ACTION_MERGE] as well. This is
2071 # important because in recordupdates, files in actions[ACTION_MERGE]
2081 # important because in recordupdates, files in actions[ACTION_MERGE]
2072 # are processed after files in other actions, and the merge driver
2082 # are processed after files in other actions, and the merge driver
2073 # might add files to those actions via extraactions above. This can
2083 # might add files to those actions via extraactions above. This can
2074 # lead to a file being recorded twice, with poor results. This is
2084 # lead to a file being recorded twice, with poor results. This is
2075 # especially problematic for actions[ACTION_REMOVE] (currently only
2085 # especially problematic for actions[ACTION_REMOVE] (currently only
2076 # possible with the merge driver in the initial merge process;
2086 # possible with the merge driver in the initial merge process;
2077 # interrupted merges don't go through this flow).
2087 # interrupted merges don't go through this flow).
2078 #
2088 #
2079 # The real fix here is to have indexes by both file and action so
2089 # The real fix here is to have indexes by both file and action so
2080 # that when the action for a file is changed it is automatically
2090 # that when the action for a file is changed it is automatically
2081 # reflected in the other action lists. But that involves a more
2091 # reflected in the other action lists. But that involves a more
2082 # complex data structure, so this will do for now.
2092 # complex data structure, so this will do for now.
2083 #
2093 #
2084 # We don't need to do the same operation for 'dc' and 'cd' because
2094 # We don't need to do the same operation for 'dc' and 'cd' because
2085 # those lists aren't consulted again.
2095 # those lists aren't consulted again.
2086 mfiles.difference_update(a[0] for a in acts)
2096 mfiles.difference_update(a[0] for a in acts)
2087
2097
2088 actions[ACTION_MERGE] = [
2098 actions[ACTION_MERGE] = [
2089 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2099 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2090 ]
2100 ]
2091
2101
2092 progress.complete()
2102 progress.complete()
2093 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2103 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2094 return updateresult(updated, merged, removed, unresolved), getfiledata
2104 return updateresult(updated, merged, removed, unresolved), getfiledata
2095
2105
2096
2106
2097 def recordupdates(repo, actions, branchmerge, getfiledata):
2107 def recordupdates(repo, actions, branchmerge, getfiledata):
2098 """record merge actions to the dirstate"""
2108 """record merge actions to the dirstate"""
2099 # remove (must come first)
2109 # remove (must come first)
2100 for f, args, msg in actions.get(ACTION_REMOVE, []):
2110 for f, args, msg in actions.get(ACTION_REMOVE, []):
2101 if branchmerge:
2111 if branchmerge:
2102 repo.dirstate.remove(f)
2112 repo.dirstate.remove(f)
2103 else:
2113 else:
2104 repo.dirstate.drop(f)
2114 repo.dirstate.drop(f)
2105
2115
2106 # forget (must come first)
2116 # forget (must come first)
2107 for f, args, msg in actions.get(ACTION_FORGET, []):
2117 for f, args, msg in actions.get(ACTION_FORGET, []):
2108 repo.dirstate.drop(f)
2118 repo.dirstate.drop(f)
2109
2119
2110 # resolve path conflicts
2120 # resolve path conflicts
2111 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2121 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2112 (f0,) = args
2122 (f0,) = args
2113 origf0 = repo.dirstate.copied(f0) or f0
2123 origf0 = repo.dirstate.copied(f0) or f0
2114 repo.dirstate.add(f)
2124 repo.dirstate.add(f)
2115 repo.dirstate.copy(origf0, f)
2125 repo.dirstate.copy(origf0, f)
2116 if f0 == origf0:
2126 if f0 == origf0:
2117 repo.dirstate.remove(f0)
2127 repo.dirstate.remove(f0)
2118 else:
2128 else:
2119 repo.dirstate.drop(f0)
2129 repo.dirstate.drop(f0)
2120
2130
2121 # re-add
2131 # re-add
2122 for f, args, msg in actions.get(ACTION_ADD, []):
2132 for f, args, msg in actions.get(ACTION_ADD, []):
2123 repo.dirstate.add(f)
2133 repo.dirstate.add(f)
2124
2134
2125 # re-add/mark as modified
2135 # re-add/mark as modified
2126 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2136 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2127 if branchmerge:
2137 if branchmerge:
2128 repo.dirstate.normallookup(f)
2138 repo.dirstate.normallookup(f)
2129 else:
2139 else:
2130 repo.dirstate.add(f)
2140 repo.dirstate.add(f)
2131
2141
2132 # exec change
2142 # exec change
2133 for f, args, msg in actions.get(ACTION_EXEC, []):
2143 for f, args, msg in actions.get(ACTION_EXEC, []):
2134 repo.dirstate.normallookup(f)
2144 repo.dirstate.normallookup(f)
2135
2145
2136 # keep
2146 # keep
2137 for f, args, msg in actions.get(ACTION_KEEP, []):
2147 for f, args, msg in actions.get(ACTION_KEEP, []):
2138 pass
2148 pass
2139
2149
2140 # get
2150 # get
2141 for f, args, msg in actions.get(ACTION_GET, []):
2151 for f, args, msg in actions.get(ACTION_GET, []):
2142 if branchmerge:
2152 if branchmerge:
2143 repo.dirstate.otherparent(f)
2153 repo.dirstate.otherparent(f)
2144 else:
2154 else:
2145 parentfiledata = getfiledata[f] if getfiledata else None
2155 parentfiledata = getfiledata[f] if getfiledata else None
2146 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2156 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2147
2157
2148 # merge
2158 # merge
2149 for f, args, msg in actions.get(ACTION_MERGE, []):
2159 for f, args, msg in actions.get(ACTION_MERGE, []):
2150 f1, f2, fa, move, anc = args
2160 f1, f2, fa, move, anc = args
2151 if branchmerge:
2161 if branchmerge:
2152 # We've done a branch merge, mark this file as merged
2162 # We've done a branch merge, mark this file as merged
2153 # so that we properly record the merger later
2163 # so that we properly record the merger later
2154 repo.dirstate.merge(f)
2164 repo.dirstate.merge(f)
2155 if f1 != f2: # copy/rename
2165 if f1 != f2: # copy/rename
2156 if move:
2166 if move:
2157 repo.dirstate.remove(f1)
2167 repo.dirstate.remove(f1)
2158 if f1 != f:
2168 if f1 != f:
2159 repo.dirstate.copy(f1, f)
2169 repo.dirstate.copy(f1, f)
2160 else:
2170 else:
2161 repo.dirstate.copy(f2, f)
2171 repo.dirstate.copy(f2, f)
2162 else:
2172 else:
2163 # We've update-merged a locally modified file, so
2173 # We've update-merged a locally modified file, so
2164 # we set the dirstate to emulate a normal checkout
2174 # we set the dirstate to emulate a normal checkout
2165 # of that file some time in the past. Thus our
2175 # of that file some time in the past. Thus our
2166 # merge will appear as a normal local file
2176 # merge will appear as a normal local file
2167 # modification.
2177 # modification.
2168 if f2 == f: # file not locally copied/moved
2178 if f2 == f: # file not locally copied/moved
2169 repo.dirstate.normallookup(f)
2179 repo.dirstate.normallookup(f)
2170 if move:
2180 if move:
2171 repo.dirstate.drop(f1)
2181 repo.dirstate.drop(f1)
2172
2182
2173 # directory rename, move local
2183 # directory rename, move local
2174 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2184 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2175 f0, flag = args
2185 f0, flag = args
2176 if branchmerge:
2186 if branchmerge:
2177 repo.dirstate.add(f)
2187 repo.dirstate.add(f)
2178 repo.dirstate.remove(f0)
2188 repo.dirstate.remove(f0)
2179 repo.dirstate.copy(f0, f)
2189 repo.dirstate.copy(f0, f)
2180 else:
2190 else:
2181 repo.dirstate.normal(f)
2191 repo.dirstate.normal(f)
2182 repo.dirstate.drop(f0)
2192 repo.dirstate.drop(f0)
2183
2193
2184 # directory rename, get
2194 # directory rename, get
2185 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2195 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2186 f0, flag = args
2196 f0, flag = args
2187 if branchmerge:
2197 if branchmerge:
2188 repo.dirstate.add(f)
2198 repo.dirstate.add(f)
2189 repo.dirstate.copy(f0, f)
2199 repo.dirstate.copy(f0, f)
2190 else:
2200 else:
2191 repo.dirstate.normal(f)
2201 repo.dirstate.normal(f)
2192
2202
2193
2203
2194 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2204 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2195 UPDATECHECK_NONE = b'none'
2205 UPDATECHECK_NONE = b'none'
2196 UPDATECHECK_LINEAR = b'linear'
2206 UPDATECHECK_LINEAR = b'linear'
2197 UPDATECHECK_NO_CONFLICT = b'noconflict'
2207 UPDATECHECK_NO_CONFLICT = b'noconflict'
2198
2208
2199
2209
2200 def update(
2210 def update(
2201 repo,
2211 repo,
2202 node,
2212 node,
2203 branchmerge,
2213 branchmerge,
2204 force,
2214 force,
2205 ancestor=None,
2215 ancestor=None,
2206 mergeancestor=False,
2216 mergeancestor=False,
2207 labels=None,
2217 labels=None,
2208 matcher=None,
2218 matcher=None,
2209 mergeforce=False,
2219 mergeforce=False,
2210 updatecheck=None,
2220 updatecheck=None,
2211 wc=None,
2221 wc=None,
2212 ):
2222 ):
2213 """
2223 """
2214 Perform a merge between the working directory and the given node
2224 Perform a merge between the working directory and the given node
2215
2225
2216 node = the node to update to
2226 node = the node to update to
2217 branchmerge = whether to merge between branches
2227 branchmerge = whether to merge between branches
2218 force = whether to force branch merging or file overwriting
2228 force = whether to force branch merging or file overwriting
2219 matcher = a matcher to filter file lists (dirstate not updated)
2229 matcher = a matcher to filter file lists (dirstate not updated)
2220 mergeancestor = whether it is merging with an ancestor. If true,
2230 mergeancestor = whether it is merging with an ancestor. If true,
2221 we should accept the incoming changes for any prompts that occur.
2231 we should accept the incoming changes for any prompts that occur.
2222 If false, merging with an ancestor (fast-forward) is only allowed
2232 If false, merging with an ancestor (fast-forward) is only allowed
2223 between different named branches. This flag is used by rebase extension
2233 between different named branches. This flag is used by rebase extension
2224 as a temporary fix and should be avoided in general.
2234 as a temporary fix and should be avoided in general.
2225 labels = labels to use for base, local and other
2235 labels = labels to use for base, local and other
2226 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2236 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2227 this is True, then 'force' should be True as well.
2237 this is True, then 'force' should be True as well.
2228
2238
2229 The table below shows all the behaviors of the update command given the
2239 The table below shows all the behaviors of the update command given the
2230 -c/--check and -C/--clean or no options, whether the working directory is
2240 -c/--check and -C/--clean or no options, whether the working directory is
2231 dirty, whether a revision is specified, and the relationship of the parent
2241 dirty, whether a revision is specified, and the relationship of the parent
2232 rev to the target rev (linear or not). Match from top first. The -n
2242 rev to the target rev (linear or not). Match from top first. The -n
2233 option doesn't exist on the command line, but represents the
2243 option doesn't exist on the command line, but represents the
2234 experimental.updatecheck=noconflict option.
2244 experimental.updatecheck=noconflict option.
2235
2245
2236 This logic is tested by test-update-branches.t.
2246 This logic is tested by test-update-branches.t.
2237
2247
2238 -c -C -n -m dirty rev linear | result
2248 -c -C -n -m dirty rev linear | result
2239 y y * * * * * | (1)
2249 y y * * * * * | (1)
2240 y * y * * * * | (1)
2250 y * y * * * * | (1)
2241 y * * y * * * | (1)
2251 y * * y * * * | (1)
2242 * y y * * * * | (1)
2252 * y y * * * * | (1)
2243 * y * y * * * | (1)
2253 * y * y * * * | (1)
2244 * * y y * * * | (1)
2254 * * y y * * * | (1)
2245 * * * * * n n | x
2255 * * * * * n n | x
2246 * * * * n * * | ok
2256 * * * * n * * | ok
2247 n n n n y * y | merge
2257 n n n n y * y | merge
2248 n n n n y y n | (2)
2258 n n n n y y n | (2)
2249 n n n y y * * | merge
2259 n n n y y * * | merge
2250 n n y n y * * | merge if no conflict
2260 n n y n y * * | merge if no conflict
2251 n y n n y * * | discard
2261 n y n n y * * | discard
2252 y n n n y * * | (3)
2262 y n n n y * * | (3)
2253
2263
2254 x = can't happen
2264 x = can't happen
2255 * = don't-care
2265 * = don't-care
2256 1 = incompatible options (checked in commands.py)
2266 1 = incompatible options (checked in commands.py)
2257 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2267 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2258 3 = abort: uncommitted changes (checked in commands.py)
2268 3 = abort: uncommitted changes (checked in commands.py)
2259
2269
2260 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2270 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2261 to repo[None] if None is passed.
2271 to repo[None] if None is passed.
2262
2272
2263 Return the same tuple as applyupdates().
2273 Return the same tuple as applyupdates().
2264 """
2274 """
2265 # Avoid cycle.
2275 # Avoid cycle.
2266 from . import sparse
2276 from . import sparse
2267
2277
2268 # This function used to find the default destination if node was None, but
2278 # This function used to find the default destination if node was None, but
2269 # that's now in destutil.py.
2279 # that's now in destutil.py.
2270 assert node is not None
2280 assert node is not None
2271 if not branchmerge and not force:
2281 if not branchmerge and not force:
2272 # TODO: remove the default once all callers that pass branchmerge=False
2282 # TODO: remove the default once all callers that pass branchmerge=False
2273 # and force=False pass a value for updatecheck. We may want to allow
2283 # and force=False pass a value for updatecheck. We may want to allow
2274 # updatecheck='abort' to better suppport some of these callers.
2284 # updatecheck='abort' to better suppport some of these callers.
2275 if updatecheck is None:
2285 if updatecheck is None:
2276 updatecheck = UPDATECHECK_LINEAR
2286 updatecheck = UPDATECHECK_LINEAR
2277 if updatecheck not in (
2287 if updatecheck not in (
2278 UPDATECHECK_NONE,
2288 UPDATECHECK_NONE,
2279 UPDATECHECK_LINEAR,
2289 UPDATECHECK_LINEAR,
2280 UPDATECHECK_NO_CONFLICT,
2290 UPDATECHECK_NO_CONFLICT,
2281 ):
2291 ):
2282 raise ValueError(
2292 raise ValueError(
2283 r'Invalid updatecheck %r (can accept %r)'
2293 r'Invalid updatecheck %r (can accept %r)'
2284 % (
2294 % (
2285 updatecheck,
2295 updatecheck,
2286 (
2296 (
2287 UPDATECHECK_NONE,
2297 UPDATECHECK_NONE,
2288 UPDATECHECK_LINEAR,
2298 UPDATECHECK_LINEAR,
2289 UPDATECHECK_NO_CONFLICT,
2299 UPDATECHECK_NO_CONFLICT,
2290 ),
2300 ),
2291 )
2301 )
2292 )
2302 )
2293 with repo.wlock():
2303 with repo.wlock():
2294 if wc is None:
2304 if wc is None:
2295 wc = repo[None]
2305 wc = repo[None]
2296 pl = wc.parents()
2306 pl = wc.parents()
2297 p1 = pl[0]
2307 p1 = pl[0]
2298 p2 = repo[node]
2308 p2 = repo[node]
2299 if ancestor is not None:
2309 if ancestor is not None:
2300 pas = [repo[ancestor]]
2310 pas = [repo[ancestor]]
2301 else:
2311 else:
2302 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2312 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2303 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2313 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2304 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2314 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2305 else:
2315 else:
2306 pas = [p1.ancestor(p2, warn=branchmerge)]
2316 pas = [p1.ancestor(p2, warn=branchmerge)]
2307
2317
2308 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2318 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2309
2319
2310 overwrite = force and not branchmerge
2320 overwrite = force and not branchmerge
2311 ### check phase
2321 ### check phase
2312 if not overwrite:
2322 if not overwrite:
2313 if len(pl) > 1:
2323 if len(pl) > 1:
2314 raise error.Abort(_(b"outstanding uncommitted merge"))
2324 raise error.Abort(_(b"outstanding uncommitted merge"))
2315 ms = mergestate.read(repo)
2325 ms = mergestate.read(repo)
2316 if list(ms.unresolved()):
2326 if list(ms.unresolved()):
2317 raise error.Abort(
2327 raise error.Abort(
2318 _(b"outstanding merge conflicts"),
2328 _(b"outstanding merge conflicts"),
2319 hint=_(b"use 'hg resolve' to resolve"),
2329 hint=_(b"use 'hg resolve' to resolve"),
2320 )
2330 )
2321 if branchmerge:
2331 if branchmerge:
2322 if pas == [p2]:
2332 if pas == [p2]:
2323 raise error.Abort(
2333 raise error.Abort(
2324 _(
2334 _(
2325 b"merging with a working directory ancestor"
2335 b"merging with a working directory ancestor"
2326 b" has no effect"
2336 b" has no effect"
2327 )
2337 )
2328 )
2338 )
2329 elif pas == [p1]:
2339 elif pas == [p1]:
2330 if not mergeancestor and wc.branch() == p2.branch():
2340 if not mergeancestor and wc.branch() == p2.branch():
2331 raise error.Abort(
2341 raise error.Abort(
2332 _(b"nothing to merge"),
2342 _(b"nothing to merge"),
2333 hint=_(b"use 'hg update' or check 'hg heads'"),
2343 hint=_(b"use 'hg update' or check 'hg heads'"),
2334 )
2344 )
2335 if not force and (wc.files() or wc.deleted()):
2345 if not force and (wc.files() or wc.deleted()):
2336 raise error.Abort(
2346 raise error.Abort(
2337 _(b"uncommitted changes"),
2347 _(b"uncommitted changes"),
2338 hint=_(b"use 'hg status' to list changes"),
2348 hint=_(b"use 'hg status' to list changes"),
2339 )
2349 )
2340 if not wc.isinmemory():
2350 if not wc.isinmemory():
2341 for s in sorted(wc.substate):
2351 for s in sorted(wc.substate):
2342 wc.sub(s).bailifchanged()
2352 wc.sub(s).bailifchanged()
2343
2353
2344 elif not overwrite:
2354 elif not overwrite:
2345 if p1 == p2: # no-op update
2355 if p1 == p2: # no-op update
2346 # call the hooks and exit early
2356 # call the hooks and exit early
2347 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2357 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2348 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2358 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2349 return updateresult(0, 0, 0, 0)
2359 return updateresult(0, 0, 0, 0)
2350
2360
2351 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2361 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2352 [p1],
2362 [p1],
2353 [p2],
2363 [p2],
2354 ): # nonlinear
2364 ): # nonlinear
2355 dirty = wc.dirty(missing=True)
2365 dirty = wc.dirty(missing=True)
2356 if dirty:
2366 if dirty:
2357 # Branching is a bit strange to ensure we do the minimal
2367 # Branching is a bit strange to ensure we do the minimal
2358 # amount of call to obsutil.foreground.
2368 # amount of call to obsutil.foreground.
2359 foreground = obsutil.foreground(repo, [p1.node()])
2369 foreground = obsutil.foreground(repo, [p1.node()])
2360 # note: the <node> variable contains a random identifier
2370 # note: the <node> variable contains a random identifier
2361 if repo[node].node() in foreground:
2371 if repo[node].node() in foreground:
2362 pass # allow updating to successors
2372 pass # allow updating to successors
2363 else:
2373 else:
2364 msg = _(b"uncommitted changes")
2374 msg = _(b"uncommitted changes")
2365 hint = _(b"commit or update --clean to discard changes")
2375 hint = _(b"commit or update --clean to discard changes")
2366 raise error.UpdateAbort(msg, hint=hint)
2376 raise error.UpdateAbort(msg, hint=hint)
2367 else:
2377 else:
2368 # Allow jumping branches if clean and specific rev given
2378 # Allow jumping branches if clean and specific rev given
2369 pass
2379 pass
2370
2380
2371 if overwrite:
2381 if overwrite:
2372 pas = [wc]
2382 pas = [wc]
2373 elif not branchmerge:
2383 elif not branchmerge:
2374 pas = [p1]
2384 pas = [p1]
2375
2385
2376 # deprecated config: merge.followcopies
2386 # deprecated config: merge.followcopies
2377 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2387 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2378 if overwrite:
2388 if overwrite:
2379 followcopies = False
2389 followcopies = False
2380 elif not pas[0]:
2390 elif not pas[0]:
2381 followcopies = False
2391 followcopies = False
2382 if not branchmerge and not wc.dirty(missing=True):
2392 if not branchmerge and not wc.dirty(missing=True):
2383 followcopies = False
2393 followcopies = False
2384
2394
2385 ### calculate phase
2395 ### calculate phase
2386 actionbyfile, diverge, renamedelete = calculateupdates(
2396 actionbyfile, diverge, renamedelete = calculateupdates(
2387 repo,
2397 repo,
2388 wc,
2398 wc,
2389 p2,
2399 p2,
2390 pas,
2400 pas,
2391 branchmerge,
2401 branchmerge,
2392 force,
2402 force,
2393 mergeancestor,
2403 mergeancestor,
2394 followcopies,
2404 followcopies,
2395 matcher=matcher,
2405 matcher=matcher,
2396 mergeforce=mergeforce,
2406 mergeforce=mergeforce,
2397 )
2407 )
2398
2408
2399 if updatecheck == UPDATECHECK_NO_CONFLICT:
2409 if updatecheck == UPDATECHECK_NO_CONFLICT:
2400 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2410 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2401 if m not in (
2411 if m not in (
2402 ACTION_GET,
2412 ACTION_GET,
2403 ACTION_KEEP,
2413 ACTION_KEEP,
2404 ACTION_EXEC,
2414 ACTION_EXEC,
2405 ACTION_REMOVE,
2415 ACTION_REMOVE,
2406 ACTION_PATH_CONFLICT_RESOLVE,
2416 ACTION_PATH_CONFLICT_RESOLVE,
2407 ):
2417 ):
2408 msg = _(b"conflicting changes")
2418 msg = _(b"conflicting changes")
2409 hint = _(b"commit or update --clean to discard changes")
2419 hint = _(b"commit or update --clean to discard changes")
2410 raise error.Abort(msg, hint=hint)
2420 raise error.Abort(msg, hint=hint)
2411
2421
2412 # Prompt and create actions. Most of this is in the resolve phase
2422 # Prompt and create actions. Most of this is in the resolve phase
2413 # already, but we can't handle .hgsubstate in filemerge or
2423 # already, but we can't handle .hgsubstate in filemerge or
2414 # subrepoutil.submerge yet so we have to keep prompting for it.
2424 # subrepoutil.submerge yet so we have to keep prompting for it.
2415 if b'.hgsubstate' in actionbyfile:
2425 if b'.hgsubstate' in actionbyfile:
2416 f = b'.hgsubstate'
2426 f = b'.hgsubstate'
2417 m, args, msg = actionbyfile[f]
2427 m, args, msg = actionbyfile[f]
2418 prompts = filemerge.partextras(labels)
2428 prompts = filemerge.partextras(labels)
2419 prompts[b'f'] = f
2429 prompts[b'f'] = f
2420 if m == ACTION_CHANGED_DELETED:
2430 if m == ACTION_CHANGED_DELETED:
2421 if repo.ui.promptchoice(
2431 if repo.ui.promptchoice(
2422 _(
2432 _(
2423 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2433 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2424 b"use (c)hanged version or (d)elete?"
2434 b"use (c)hanged version or (d)elete?"
2425 b"$$ &Changed $$ &Delete"
2435 b"$$ &Changed $$ &Delete"
2426 )
2436 )
2427 % prompts,
2437 % prompts,
2428 0,
2438 0,
2429 ):
2439 ):
2430 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2440 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2431 elif f in p1:
2441 elif f in p1:
2432 actionbyfile[f] = (
2442 actionbyfile[f] = (
2433 ACTION_ADD_MODIFIED,
2443 ACTION_ADD_MODIFIED,
2434 None,
2444 None,
2435 b'prompt keep',
2445 b'prompt keep',
2436 )
2446 )
2437 else:
2447 else:
2438 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2448 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2439 elif m == ACTION_DELETED_CHANGED:
2449 elif m == ACTION_DELETED_CHANGED:
2440 f1, f2, fa, move, anc = args
2450 f1, f2, fa, move, anc = args
2441 flags = p2[f2].flags()
2451 flags = p2[f2].flags()
2442 if (
2452 if (
2443 repo.ui.promptchoice(
2453 repo.ui.promptchoice(
2444 _(
2454 _(
2445 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2455 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2446 b"use (c)hanged version or leave (d)eleted?"
2456 b"use (c)hanged version or leave (d)eleted?"
2447 b"$$ &Changed $$ &Deleted"
2457 b"$$ &Changed $$ &Deleted"
2448 )
2458 )
2449 % prompts,
2459 % prompts,
2450 0,
2460 0,
2451 )
2461 )
2452 == 0
2462 == 0
2453 ):
2463 ):
2454 actionbyfile[f] = (
2464 actionbyfile[f] = (
2455 ACTION_GET,
2465 ACTION_GET,
2456 (flags, False),
2466 (flags, False),
2457 b'prompt recreating',
2467 b'prompt recreating',
2458 )
2468 )
2459 else:
2469 else:
2460 del actionbyfile[f]
2470 del actionbyfile[f]
2461
2471
2462 # Convert to dictionary-of-lists format
2472 # Convert to dictionary-of-lists format
2463 actions = emptyactions()
2473 actions = emptyactions()
2464 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2474 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2465 if m not in actions:
2475 if m not in actions:
2466 actions[m] = []
2476 actions[m] = []
2467 actions[m].append((f, args, msg))
2477 actions[m].append((f, args, msg))
2468
2478
2469 if not util.fscasesensitive(repo.path):
2479 if not util.fscasesensitive(repo.path):
2470 # check collision between files only in p2 for clean update
2480 # check collision between files only in p2 for clean update
2471 if not branchmerge and (
2481 if not branchmerge and (
2472 force or not wc.dirty(missing=True, branch=False)
2482 force or not wc.dirty(missing=True, branch=False)
2473 ):
2483 ):
2474 _checkcollision(repo, p2.manifest(), None)
2484 _checkcollision(repo, p2.manifest(), None)
2475 else:
2485 else:
2476 _checkcollision(repo, wc.manifest(), actions)
2486 _checkcollision(repo, wc.manifest(), actions)
2477
2487
2478 # divergent renames
2488 # divergent renames
2479 for f, fl in sorted(pycompat.iteritems(diverge)):
2489 for f, fl in sorted(pycompat.iteritems(diverge)):
2480 repo.ui.warn(
2490 repo.ui.warn(
2481 _(
2491 _(
2482 b"note: possible conflict - %s was renamed "
2492 b"note: possible conflict - %s was renamed "
2483 b"multiple times to:\n"
2493 b"multiple times to:\n"
2484 )
2494 )
2485 % f
2495 % f
2486 )
2496 )
2487 for nf in sorted(fl):
2497 for nf in sorted(fl):
2488 repo.ui.warn(b" %s\n" % nf)
2498 repo.ui.warn(b" %s\n" % nf)
2489
2499
2490 # rename and delete
2500 # rename and delete
2491 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2501 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2492 repo.ui.warn(
2502 repo.ui.warn(
2493 _(
2503 _(
2494 b"note: possible conflict - %s was deleted "
2504 b"note: possible conflict - %s was deleted "
2495 b"and renamed to:\n"
2505 b"and renamed to:\n"
2496 )
2506 )
2497 % f
2507 % f
2498 )
2508 )
2499 for nf in sorted(fl):
2509 for nf in sorted(fl):
2500 repo.ui.warn(b" %s\n" % nf)
2510 repo.ui.warn(b" %s\n" % nf)
2501
2511
2502 ### apply phase
2512 ### apply phase
2503 if not branchmerge: # just jump to the new rev
2513 if not branchmerge: # just jump to the new rev
2504 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2514 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2505 # If we're doing a partial update, we need to skip updating
2515 # If we're doing a partial update, we need to skip updating
2506 # the dirstate.
2516 # the dirstate.
2507 always = matcher is None or matcher.always()
2517 always = matcher is None or matcher.always()
2508 updatedirstate = always and not wc.isinmemory()
2518 updatedirstate = always and not wc.isinmemory()
2509 if updatedirstate:
2519 if updatedirstate:
2510 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2520 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2511 # note that we're in the middle of an update
2521 # note that we're in the middle of an update
2512 repo.vfs.write(b'updatestate', p2.hex())
2522 repo.vfs.write(b'updatestate', p2.hex())
2513
2523
2514 # Advertise fsmonitor when its presence could be useful.
2524 # Advertise fsmonitor when its presence could be useful.
2515 #
2525 #
2516 # We only advertise when performing an update from an empty working
2526 # We only advertise when performing an update from an empty working
2517 # directory. This typically only occurs during initial clone.
2527 # directory. This typically only occurs during initial clone.
2518 #
2528 #
2519 # We give users a mechanism to disable the warning in case it is
2529 # We give users a mechanism to disable the warning in case it is
2520 # annoying.
2530 # annoying.
2521 #
2531 #
2522 # We only allow on Linux and MacOS because that's where fsmonitor is
2532 # We only allow on Linux and MacOS because that's where fsmonitor is
2523 # considered stable.
2533 # considered stable.
2524 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2534 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2525 fsmonitorthreshold = repo.ui.configint(
2535 fsmonitorthreshold = repo.ui.configint(
2526 b'fsmonitor', b'warn_update_file_count'
2536 b'fsmonitor', b'warn_update_file_count'
2527 )
2537 )
2528 try:
2538 try:
2529 # avoid cycle: extensions -> cmdutil -> merge
2539 # avoid cycle: extensions -> cmdutil -> merge
2530 from . import extensions
2540 from . import extensions
2531
2541
2532 extensions.find(b'fsmonitor')
2542 extensions.find(b'fsmonitor')
2533 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2543 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2534 # We intentionally don't look at whether fsmonitor has disabled
2544 # We intentionally don't look at whether fsmonitor has disabled
2535 # itself because a) fsmonitor may have already printed a warning
2545 # itself because a) fsmonitor may have already printed a warning
2536 # b) we only care about the config state here.
2546 # b) we only care about the config state here.
2537 except KeyError:
2547 except KeyError:
2538 fsmonitorenabled = False
2548 fsmonitorenabled = False
2539
2549
2540 if (
2550 if (
2541 fsmonitorwarning
2551 fsmonitorwarning
2542 and not fsmonitorenabled
2552 and not fsmonitorenabled
2543 and p1.node() == nullid
2553 and p1.node() == nullid
2544 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2554 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2545 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2555 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2546 ):
2556 ):
2547 repo.ui.warn(
2557 repo.ui.warn(
2548 _(
2558 _(
2549 b'(warning: large working directory being used without '
2559 b'(warning: large working directory being used without '
2550 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2560 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2551 b'see "hg help -e fsmonitor")\n'
2561 b'see "hg help -e fsmonitor")\n'
2552 )
2562 )
2553 )
2563 )
2554
2564
2555 wantfiledata = updatedirstate and not branchmerge
2565 wantfiledata = updatedirstate and not branchmerge
2556 stats, getfiledata = applyupdates(
2566 stats, getfiledata = applyupdates(
2557 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2567 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2558 )
2568 )
2559
2569
2560 if updatedirstate:
2570 if updatedirstate:
2561 with repo.dirstate.parentchange():
2571 with repo.dirstate.parentchange():
2562 repo.setparents(fp1, fp2)
2572 repo.setparents(fp1, fp2)
2563 recordupdates(repo, actions, branchmerge, getfiledata)
2573 recordupdates(repo, actions, branchmerge, getfiledata)
2564 # update completed, clear state
2574 # update completed, clear state
2565 util.unlink(repo.vfs.join(b'updatestate'))
2575 util.unlink(repo.vfs.join(b'updatestate'))
2566
2576
2567 if not branchmerge:
2577 if not branchmerge:
2568 repo.dirstate.setbranch(p2.branch())
2578 repo.dirstate.setbranch(p2.branch())
2569
2579
2570 # If we're updating to a location, clean up any stale temporary includes
2580 # If we're updating to a location, clean up any stale temporary includes
2571 # (ex: this happens during hg rebase --abort).
2581 # (ex: this happens during hg rebase --abort).
2572 if not branchmerge:
2582 if not branchmerge:
2573 sparse.prunetemporaryincludes(repo)
2583 sparse.prunetemporaryincludes(repo)
2574
2584
2575 if updatedirstate:
2585 if updatedirstate:
2576 repo.hook(
2586 repo.hook(
2577 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2587 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2578 )
2588 )
2579 return stats
2589 return stats
2580
2590
2581
2591
2582 def graft(
2592 def graft(
2583 repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
2593 repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
2584 ):
2594 ):
2585 """Do a graft-like merge.
2595 """Do a graft-like merge.
2586
2596
2587 This is a merge where the merge ancestor is chosen such that one
2597 This is a merge where the merge ancestor is chosen such that one
2588 or more changesets are grafted onto the current changeset. In
2598 or more changesets are grafted onto the current changeset. In
2589 addition to the merge, this fixes up the dirstate to include only
2599 addition to the merge, this fixes up the dirstate to include only
2590 a single parent (if keepparent is False) and tries to duplicate any
2600 a single parent (if keepparent is False) and tries to duplicate any
2591 renames/copies appropriately.
2601 renames/copies appropriately.
2592
2602
2593 ctx - changeset to rebase
2603 ctx - changeset to rebase
2594 base - merge base, usually ctx.p1()
2604 base - merge base, usually ctx.p1()
2595 labels - merge labels eg ['local', 'graft']
2605 labels - merge labels eg ['local', 'graft']
2596 keepparent - keep second parent if any
2606 keepparent - keep second parent if any
2597 keepconflictparent - if unresolved, keep parent used for the merge
2607 keepconflictparent - if unresolved, keep parent used for the merge
2598
2608
2599 """
2609 """
2600 # If we're grafting a descendant onto an ancestor, be sure to pass
2610 # If we're grafting a descendant onto an ancestor, be sure to pass
2601 # mergeancestor=True to update. This does two things: 1) allows the merge if
2611 # mergeancestor=True to update. This does two things: 1) allows the merge if
2602 # the destination is the same as the parent of the ctx (so we can use graft
2612 # the destination is the same as the parent of the ctx (so we can use graft
2603 # to copy commits), and 2) informs update that the incoming changes are
2613 # to copy commits), and 2) informs update that the incoming changes are
2604 # newer than the destination so it doesn't prompt about "remote changed foo
2614 # newer than the destination so it doesn't prompt about "remote changed foo
2605 # which local deleted".
2615 # which local deleted".
2606 wctx = repo[None]
2616 wctx = repo[None]
2607 pctx = wctx.p1()
2617 pctx = wctx.p1()
2608 mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
2618 mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
2609
2619
2610 stats = update(
2620 stats = update(
2611 repo,
2621 repo,
2612 ctx.node(),
2622 ctx.node(),
2613 True,
2623 True,
2614 True,
2624 True,
2615 base.node(),
2625 base.node(),
2616 mergeancestor=mergeancestor,
2626 mergeancestor=mergeancestor,
2617 labels=labels,
2627 labels=labels,
2618 )
2628 )
2619
2629
2620 if keepconflictparent and stats.unresolvedcount:
2630 if keepconflictparent and stats.unresolvedcount:
2621 pother = ctx.node()
2631 pother = ctx.node()
2622 else:
2632 else:
2623 pother = nullid
2633 pother = nullid
2624 parents = ctx.parents()
2634 parents = ctx.parents()
2625 if keepparent and len(parents) == 2 and base in parents:
2635 if keepparent and len(parents) == 2 and base in parents:
2626 parents.remove(base)
2636 parents.remove(base)
2627 pother = parents[0].node()
2637 pother = parents[0].node()
2628 # Never set both parents equal to each other
2638 # Never set both parents equal to each other
2629 if pother == pctx.node():
2639 if pother == pctx.node():
2630 pother = nullid
2640 pother = nullid
2631
2641
2632 with repo.dirstate.parentchange():
2642 with repo.dirstate.parentchange():
2633 repo.setparents(pctx.node(), pother)
2643 repo.setparents(pctx.node(), pother)
2634 repo.dirstate.write(repo.currenttransaction())
2644 repo.dirstate.write(repo.currenttransaction())
2635 # fix up dirstate for copies and renames
2645 # fix up dirstate for copies and renames
2636 copies.graftcopies(wctx, ctx, base)
2646 copies.graftcopies(wctx, ctx, base)
2637 return stats
2647 return stats
2638
2648
2639
2649
2640 def purge(
2650 def purge(
2641 repo,
2651 repo,
2642 matcher,
2652 matcher,
2643 ignored=False,
2653 ignored=False,
2644 removeemptydirs=True,
2654 removeemptydirs=True,
2645 removefiles=True,
2655 removefiles=True,
2646 abortonerror=False,
2656 abortonerror=False,
2647 noop=False,
2657 noop=False,
2648 ):
2658 ):
2649 """Purge the working directory of untracked files.
2659 """Purge the working directory of untracked files.
2650
2660
2651 ``matcher`` is a matcher configured to scan the working directory -
2661 ``matcher`` is a matcher configured to scan the working directory -
2652 potentially a subset.
2662 potentially a subset.
2653
2663
2654 ``ignored`` controls whether ignored files should also be purged.
2664 ``ignored`` controls whether ignored files should also be purged.
2655
2665
2656 ``removeemptydirs`` controls whether empty directories should be removed.
2666 ``removeemptydirs`` controls whether empty directories should be removed.
2657
2667
2658 ``removefiles`` controls whether files are removed.
2668 ``removefiles`` controls whether files are removed.
2659
2669
2660 ``abortonerror`` causes an exception to be raised if an error occurs
2670 ``abortonerror`` causes an exception to be raised if an error occurs
2661 deleting a file or directory.
2671 deleting a file or directory.
2662
2672
2663 ``noop`` controls whether to actually remove files. If not defined, actions
2673 ``noop`` controls whether to actually remove files. If not defined, actions
2664 will be taken.
2674 will be taken.
2665
2675
2666 Returns an iterable of relative paths in the working directory that were
2676 Returns an iterable of relative paths in the working directory that were
2667 or would be removed.
2677 or would be removed.
2668 """
2678 """
2669
2679
2670 def remove(removefn, path):
2680 def remove(removefn, path):
2671 try:
2681 try:
2672 removefn(path)
2682 removefn(path)
2673 except OSError:
2683 except OSError:
2674 m = _(b'%s cannot be removed') % path
2684 m = _(b'%s cannot be removed') % path
2675 if abortonerror:
2685 if abortonerror:
2676 raise error.Abort(m)
2686 raise error.Abort(m)
2677 else:
2687 else:
2678 repo.ui.warn(_(b'warning: %s\n') % m)
2688 repo.ui.warn(_(b'warning: %s\n') % m)
2679
2689
2680 # There's no API to copy a matcher. So mutate the passed matcher and
2690 # There's no API to copy a matcher. So mutate the passed matcher and
2681 # restore it when we're done.
2691 # restore it when we're done.
2682 oldtraversedir = matcher.traversedir
2692 oldtraversedir = matcher.traversedir
2683
2693
2684 res = []
2694 res = []
2685
2695
2686 try:
2696 try:
2687 if removeemptydirs:
2697 if removeemptydirs:
2688 directories = []
2698 directories = []
2689 matcher.traversedir = directories.append
2699 matcher.traversedir = directories.append
2690
2700
2691 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2701 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2692
2702
2693 if removefiles:
2703 if removefiles:
2694 for f in sorted(status.unknown + status.ignored):
2704 for f in sorted(status.unknown + status.ignored):
2695 if not noop:
2705 if not noop:
2696 repo.ui.note(_(b'removing file %s\n') % f)
2706 repo.ui.note(_(b'removing file %s\n') % f)
2697 remove(repo.wvfs.unlink, f)
2707 remove(repo.wvfs.unlink, f)
2698 res.append(f)
2708 res.append(f)
2699
2709
2700 if removeemptydirs:
2710 if removeemptydirs:
2701 for f in sorted(directories, reverse=True):
2711 for f in sorted(directories, reverse=True):
2702 if matcher(f) and not repo.wvfs.listdir(f):
2712 if matcher(f) and not repo.wvfs.listdir(f):
2703 if not noop:
2713 if not noop:
2704 repo.ui.note(_(b'removing directory %s\n') % f)
2714 repo.ui.note(_(b'removing directory %s\n') % f)
2705 remove(repo.wvfs.rmdir, f)
2715 remove(repo.wvfs.rmdir, f)
2706 res.append(f)
2716 res.append(f)
2707
2717
2708 return res
2718 return res
2709
2719
2710 finally:
2720 finally:
2711 matcher.traversedir = oldtraversedir
2721 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now