Show More
@@ -1,372 +1,379 | |||
|
1 | 1 | # copies.py - copy detection for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import util |
|
9 | 9 | import heapq |
|
10 | 10 | |
|
11 | 11 | def _nonoverlap(d1, d2, d3): |
|
12 | 12 | "Return list of elements in d1 not in d2 or d3" |
|
13 | 13 | return sorted([d for d in d1 if d not in d3 and d not in d2]) |
|
14 | 14 | |
|
15 | 15 | def _dirname(f): |
|
16 | 16 | s = f.rfind("/") |
|
17 | 17 | if s == -1: |
|
18 | 18 | return "" |
|
19 | 19 | return f[:s] |
|
20 | 20 | |
|
21 | 21 | def _findlimit(repo, a, b): |
|
22 | 22 | """Find the earliest revision that's an ancestor of a or b but not both, |
|
23 | 23 | None if no such revision exists. |
|
24 | 24 | """ |
|
25 | 25 | # basic idea: |
|
26 | 26 | # - mark a and b with different sides |
|
27 | 27 | # - if a parent's children are all on the same side, the parent is |
|
28 | 28 | # on that side, otherwise it is on no side |
|
29 | 29 | # - walk the graph in topological order with the help of a heap; |
|
30 | 30 | # - add unseen parents to side map |
|
31 | 31 | # - clear side of any parent that has children on different sides |
|
32 | 32 | # - track number of interesting revs that might still be on a side |
|
33 | 33 | # - track the lowest interesting rev seen |
|
34 | 34 | # - quit when interesting revs is zero |
|
35 | 35 | |
|
36 | 36 | cl = repo.changelog |
|
37 | 37 | working = len(cl) # pseudo rev for the working directory |
|
38 | 38 | if a is None: |
|
39 | 39 | a = working |
|
40 | 40 | if b is None: |
|
41 | 41 | b = working |
|
42 | 42 | |
|
43 | 43 | side = {a: -1, b: 1} |
|
44 | 44 | visit = [-a, -b] |
|
45 | 45 | heapq.heapify(visit) |
|
46 | 46 | interesting = len(visit) |
|
47 | 47 | hascommonancestor = False |
|
48 | 48 | limit = working |
|
49 | 49 | |
|
50 | 50 | while interesting: |
|
51 | 51 | r = -heapq.heappop(visit) |
|
52 | 52 | if r == working: |
|
53 | 53 | parents = [cl.rev(p) for p in repo.dirstate.parents()] |
|
54 | 54 | else: |
|
55 | 55 | parents = cl.parentrevs(r) |
|
56 | 56 | for p in parents: |
|
57 | 57 | if p < 0: |
|
58 | 58 | continue |
|
59 | 59 | if p not in side: |
|
60 | 60 | # first time we see p; add it to visit |
|
61 | 61 | side[p] = side[r] |
|
62 | 62 | if side[p]: |
|
63 | 63 | interesting += 1 |
|
64 | 64 | heapq.heappush(visit, -p) |
|
65 | 65 | elif side[p] and side[p] != side[r]: |
|
66 | 66 | # p was interesting but now we know better |
|
67 | 67 | side[p] = 0 |
|
68 | 68 | interesting -= 1 |
|
69 | 69 | hascommonancestor = True |
|
70 | 70 | if side[r]: |
|
71 | 71 | limit = r # lowest rev visited |
|
72 | 72 | interesting -= 1 |
|
73 | 73 | |
|
74 | 74 | if not hascommonancestor: |
|
75 | 75 | return None |
|
76 | 76 | return limit |
|
77 | 77 | |
|
78 | 78 | def _chain(src, dst, a, b): |
|
79 | 79 | '''chain two sets of copies a->b''' |
|
80 | 80 | t = a.copy() |
|
81 | 81 | for k, v in b.iteritems(): |
|
82 | 82 | if v in t: |
|
83 | 83 | # found a chain |
|
84 | 84 | if t[v] != k: |
|
85 | 85 | # file wasn't renamed back to itself |
|
86 | 86 | t[k] = t[v] |
|
87 | 87 | if v not in dst: |
|
88 | 88 | # chain was a rename, not a copy |
|
89 | 89 | del t[v] |
|
90 | 90 | if v in src: |
|
91 | 91 | # file is a copy of an existing file |
|
92 | 92 | t[k] = v |
|
93 | 93 | |
|
94 | 94 | # remove criss-crossed copies |
|
95 | 95 | for k, v in t.items(): |
|
96 | 96 | if k in src and v in dst: |
|
97 | 97 | del t[k] |
|
98 | 98 | |
|
99 | 99 | return t |
|
100 | 100 | |
|
101 | 101 | def _tracefile(fctx, actx): |
|
102 | 102 | '''return file context that is the ancestor of fctx present in actx''' |
|
103 | 103 | stop = actx.rev() |
|
104 | 104 | am = actx.manifest() |
|
105 | 105 | |
|
106 | 106 | for f in fctx.ancestors(): |
|
107 | 107 | if am.get(f.path(), None) == f.filenode(): |
|
108 | 108 | return f |
|
109 | 109 | if f.rev() < stop: |
|
110 | 110 | return None |
|
111 | 111 | |
|
112 | 112 | def _dirstatecopies(d): |
|
113 | 113 | ds = d._repo.dirstate |
|
114 | 114 | c = ds.copies().copy() |
|
115 | 115 | for k in c.keys(): |
|
116 | 116 | if ds[k] not in 'anm': |
|
117 | 117 | del c[k] |
|
118 | 118 | return c |
|
119 | 119 | |
|
120 | 120 | def _forwardcopies(a, b): |
|
121 | 121 | '''find {dst@b: src@a} copy mapping where a is an ancestor of b''' |
|
122 | 122 | |
|
123 | 123 | # check for working copy |
|
124 | 124 | w = None |
|
125 | 125 | if b.rev() is None: |
|
126 | 126 | w = b |
|
127 | 127 | b = w.p1() |
|
128 | 128 | if a == b: |
|
129 | 129 | # short-circuit to avoid issues with merge states |
|
130 | 130 | return _dirstatecopies(w) |
|
131 | 131 | |
|
132 | 132 | # find where new files came from |
|
133 | 133 | # we currently don't try to find where old files went, too expensive |
|
134 | 134 | # this means we can miss a case like 'hg rm b; hg cp a b' |
|
135 | 135 | cm = {} |
|
136 | 136 | for f in b: |
|
137 | 137 | if f not in a: |
|
138 | 138 | ofctx = _tracefile(b[f], a) |
|
139 | 139 | if ofctx: |
|
140 | 140 | cm[f] = ofctx.path() |
|
141 | 141 | |
|
142 | 142 | # combine copies from dirstate if necessary |
|
143 | 143 | if w is not None: |
|
144 | 144 | cm = _chain(a, w, cm, _dirstatecopies(w)) |
|
145 | 145 | |
|
146 | 146 | return cm |
|
147 | 147 | |
|
148 | 148 | def _backwardcopies(a, b): |
|
149 | 149 | # because the forward mapping is 1:n, we can lose renames here |
|
150 | 150 | # in particular, we find renames better than copies |
|
151 | 151 | f = _forwardcopies(b, a) |
|
152 | 152 | r = {} |
|
153 | 153 | for k, v in f.iteritems(): |
|
154 | 154 | r[v] = k |
|
155 | 155 | return r |
|
156 | 156 | |
|
157 | 157 | def pathcopies(x, y): |
|
158 | 158 | '''find {dst@y: src@x} copy mapping for directed compare''' |
|
159 | 159 | if x == y or not x or not y: |
|
160 | 160 | return {} |
|
161 | 161 | a = y.ancestor(x) |
|
162 | 162 | if a == x: |
|
163 | 163 | return _forwardcopies(x, y) |
|
164 | 164 | if a == y: |
|
165 | 165 | return _backwardcopies(x, y) |
|
166 | 166 | return _chain(x, y, _backwardcopies(x, a), _forwardcopies(a, y)) |
|
167 | 167 | |
|
168 | 168 | def mergecopies(repo, c1, c2, ca): |
|
169 | 169 | """ |
|
170 | 170 | Find moves and copies between context c1 and c2 that are relevant |
|
171 | 171 | for merging. |
|
172 | 172 | |
|
173 |
Returns |
|
|
173 | Returns four dicts: "copy", "movewithdir", "diverge", and | |
|
174 | "renamedelete". | |
|
174 | 175 | |
|
175 | 176 | "copy" is a mapping from destination name -> source name, |
|
176 | 177 | where source is in c1 and destination is in c2 or vice-versa. |
|
177 | 178 | |
|
179 | "movewithdir" is a mapping from source name -> destination name, | |
|
180 | where the file at source present in one context but not the other | |
|
181 | needs to be moved to destination by the merge process, because the | |
|
182 | other context moved the directory it is in. | |
|
183 | ||
|
178 | 184 | "diverge" is a mapping of source name -> list of destination names |
|
179 | 185 | for divergent renames. |
|
180 | 186 | |
|
181 | 187 | "renamedelete" is a mapping of source name -> list of destination |
|
182 | 188 | names for files deleted in c1 that were renamed in c2 or vice-versa. |
|
183 | 189 | """ |
|
184 | 190 | # avoid silly behavior for update from empty dir |
|
185 | 191 | if not c1 or not c2 or c1 == c2: |
|
186 | return {}, {}, {} | |
|
192 | return {}, {}, {}, {} | |
|
187 | 193 | |
|
188 | 194 | # avoid silly behavior for parent -> working dir |
|
189 | 195 | if c2.node() is None and c1.node() == repo.dirstate.p1(): |
|
190 | return repo.dirstate.copies(), {}, {} | |
|
196 | return repo.dirstate.copies(), {}, {}, {} | |
|
191 | 197 | |
|
192 | 198 | limit = _findlimit(repo, c1.rev(), c2.rev()) |
|
193 | 199 | if limit is None: |
|
194 | 200 | # no common ancestor, no copies |
|
195 | return {}, {}, {} | |
|
201 | return {}, {}, {}, {} | |
|
196 | 202 | m1 = c1.manifest() |
|
197 | 203 | m2 = c2.manifest() |
|
198 | 204 | ma = ca.manifest() |
|
199 | 205 | |
|
200 | 206 | def makectx(f, n): |
|
201 | 207 | if len(n) != 20: # in a working context? |
|
202 | 208 | if c1.rev() is None: |
|
203 | 209 | return c1.filectx(f) |
|
204 | 210 | return c2.filectx(f) |
|
205 | 211 | return repo.filectx(f, fileid=n) |
|
206 | 212 | |
|
207 | 213 | ctx = util.lrucachefunc(makectx) |
|
208 | 214 | copy = {} |
|
215 | movewithdir = {} | |
|
209 | 216 | fullcopy = {} |
|
210 | 217 | diverge = {} |
|
211 | 218 | |
|
212 | 219 | def related(f1, f2, limit): |
|
213 | 220 | # Walk back to common ancestor to see if the two files originate |
|
214 | 221 | # from the same file. Since workingfilectx's rev() is None it messes |
|
215 | 222 | # up the integer comparison logic, hence the pre-step check for |
|
216 | 223 | # None (f1 and f2 can only be workingfilectx's initially). |
|
217 | 224 | |
|
218 | 225 | if f1 == f2: |
|
219 | 226 | return f1 # a match |
|
220 | 227 | |
|
221 | 228 | g1, g2 = f1.ancestors(), f2.ancestors() |
|
222 | 229 | try: |
|
223 | 230 | f1r, f2r = f1.rev(), f2.rev() |
|
224 | 231 | |
|
225 | 232 | if f1r is None: |
|
226 | 233 | f1 = g1.next() |
|
227 | 234 | if f2r is None: |
|
228 | 235 | f2 = g2.next() |
|
229 | 236 | |
|
230 | 237 | while True: |
|
231 | 238 | f1r, f2r = f1.rev(), f2.rev() |
|
232 | 239 | if f1r > f2r: |
|
233 | 240 | f1 = g1.next() |
|
234 | 241 | elif f2r > f1r: |
|
235 | 242 | f2 = g2.next() |
|
236 | 243 | elif f1 == f2: |
|
237 | 244 | return f1 # a match |
|
238 | 245 | elif f1r == f2r or f1r < limit or f2r < limit: |
|
239 | 246 | return False # copy no longer relevant |
|
240 | 247 | except StopIteration: |
|
241 | 248 | return False |
|
242 | 249 | |
|
243 | 250 | def checkcopies(f, m1, m2): |
|
244 | 251 | '''check possible copies of f from m1 to m2''' |
|
245 | 252 | of = None |
|
246 | 253 | seen = set([f]) |
|
247 | 254 | for oc in ctx(f, m1[f]).ancestors(): |
|
248 | 255 | ocr = oc.rev() |
|
249 | 256 | of = oc.path() |
|
250 | 257 | if of in seen: |
|
251 | 258 | # check limit late - grab last rename before |
|
252 | 259 | if ocr < limit: |
|
253 | 260 | break |
|
254 | 261 | continue |
|
255 | 262 | seen.add(of) |
|
256 | 263 | |
|
257 | 264 | fullcopy[f] = of # remember for dir rename detection |
|
258 | 265 | if of not in m2: |
|
259 | 266 | continue # no match, keep looking |
|
260 | 267 | if m2[of] == ma.get(of): |
|
261 | 268 | break # no merge needed, quit early |
|
262 | 269 | c2 = ctx(of, m2[of]) |
|
263 | 270 | cr = related(oc, c2, ca.rev()) |
|
264 | 271 | if cr and (of == f or of == c2.path()): # non-divergent |
|
265 | 272 | copy[f] = of |
|
266 | 273 | of = None |
|
267 | 274 | break |
|
268 | 275 | |
|
269 | 276 | if of in ma: |
|
270 | 277 | diverge.setdefault(of, []).append(f) |
|
271 | 278 | |
|
272 | 279 | repo.ui.debug(" searching for copies back to rev %d\n" % limit) |
|
273 | 280 | |
|
274 | 281 | u1 = _nonoverlap(m1, m2, ma) |
|
275 | 282 | u2 = _nonoverlap(m2, m1, ma) |
|
276 | 283 | |
|
277 | 284 | if u1: |
|
278 | 285 | repo.ui.debug(" unmatched files in local:\n %s\n" |
|
279 | 286 | % "\n ".join(u1)) |
|
280 | 287 | if u2: |
|
281 | 288 | repo.ui.debug(" unmatched files in other:\n %s\n" |
|
282 | 289 | % "\n ".join(u2)) |
|
283 | 290 | |
|
284 | 291 | for f in u1: |
|
285 | 292 | checkcopies(f, m1, m2) |
|
286 | 293 | for f in u2: |
|
287 | 294 | checkcopies(f, m2, m1) |
|
288 | 295 | |
|
289 | 296 | renamedelete = {} |
|
290 | 297 | renamedelete2 = set() |
|
291 | 298 | diverge2 = set() |
|
292 | 299 | for of, fl in diverge.items(): |
|
293 | 300 | if len(fl) == 1 or of in c1 or of in c2: |
|
294 | 301 | del diverge[of] # not actually divergent, or not a rename |
|
295 | 302 | if of not in c1 and of not in c2: |
|
296 | 303 | # renamed on one side, deleted on the other side, but filter |
|
297 | 304 | # out files that have been renamed and then deleted |
|
298 | 305 | renamedelete[of] = [f for f in fl if f in c1 or f in c2] |
|
299 | 306 | renamedelete2.update(fl) # reverse map for below |
|
300 | 307 | else: |
|
301 | 308 | diverge2.update(fl) # reverse map for below |
|
302 | 309 | |
|
303 | 310 | if fullcopy: |
|
304 | 311 | repo.ui.debug(" all copies found (* = to merge, ! = divergent, " |
|
305 | 312 | "% = renamed and deleted):\n") |
|
306 | 313 | for f in fullcopy: |
|
307 | 314 | note = "" |
|
308 | 315 | if f in copy: |
|
309 | 316 | note += "*" |
|
310 | 317 | if f in diverge2: |
|
311 | 318 | note += "!" |
|
312 | 319 | if f in renamedelete2: |
|
313 | 320 | note += "%" |
|
314 | 321 | repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note)) |
|
315 | 322 | del diverge2 |
|
316 | 323 | |
|
317 | 324 | if not fullcopy: |
|
318 | return copy, diverge, renamedelete | |
|
325 | return copy, movewithdir, diverge, renamedelete | |
|
319 | 326 | |
|
320 | 327 | repo.ui.debug(" checking for directory renames\n") |
|
321 | 328 | |
|
322 | 329 | # generate a directory move map |
|
323 | 330 | d1, d2 = c1.dirs(), c2.dirs() |
|
324 | 331 | d1.add('') |
|
325 | 332 | d2.add('') |
|
326 | 333 | invalid = set() |
|
327 | 334 | dirmove = {} |
|
328 | 335 | |
|
329 | 336 | # examine each file copy for a potential directory move, which is |
|
330 | 337 | # when all the files in a directory are moved to a new directory |
|
331 | 338 | for dst, src in fullcopy.iteritems(): |
|
332 | 339 | dsrc, ddst = _dirname(src), _dirname(dst) |
|
333 | 340 | if dsrc in invalid: |
|
334 | 341 | # already seen to be uninteresting |
|
335 | 342 | continue |
|
336 | 343 | elif dsrc in d1 and ddst in d1: |
|
337 | 344 | # directory wasn't entirely moved locally |
|
338 | 345 | invalid.add(dsrc) |
|
339 | 346 | elif dsrc in d2 and ddst in d2: |
|
340 | 347 | # directory wasn't entirely moved remotely |
|
341 | 348 | invalid.add(dsrc) |
|
342 | 349 | elif dsrc in dirmove and dirmove[dsrc] != ddst: |
|
343 | 350 | # files from the same directory moved to two different places |
|
344 | 351 | invalid.add(dsrc) |
|
345 | 352 | else: |
|
346 | 353 | # looks good so far |
|
347 | 354 | dirmove[dsrc + "/"] = ddst + "/" |
|
348 | 355 | |
|
349 | 356 | for i in invalid: |
|
350 | 357 | if i in dirmove: |
|
351 | 358 | del dirmove[i] |
|
352 | 359 | del d1, d2, invalid |
|
353 | 360 | |
|
354 | 361 | if not dirmove: |
|
355 | return copy, diverge, renamedelete | |
|
362 | return copy, movewithdir, diverge, renamedelete | |
|
356 | 363 | |
|
357 | 364 | for d in dirmove: |
|
358 | 365 | repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d])) |
|
359 | 366 | |
|
360 | 367 | # check unaccounted nonoverlapping files against directory moves |
|
361 | 368 | for f in u1 + u2: |
|
362 | 369 | if f not in fullcopy: |
|
363 | 370 | for d in dirmove: |
|
364 | 371 | if f.startswith(d): |
|
365 | 372 | # new file added in a directory that was moved, move it |
|
366 | 373 | df = dirmove[d] + f[len(d):] |
|
367 | 374 | if df not in copy: |
|
368 |
|
|
|
369 |
repo.ui.debug(" file %s -> %s\n" % (f, |
|
|
375 | movewithdir[f] = df | |
|
376 | repo.ui.debug(" file %s -> %s\n" % (f, df)) | |
|
370 | 377 | break |
|
371 | 378 | |
|
372 | return copy, diverge, renamedelete | |
|
379 | return copy, movewithdir, diverge, renamedelete |
@@ -1,653 +1,656 | |||
|
1 | 1 | # merge.py - directory-level update/merge handling for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from node import nullid, nullrev, hex, bin |
|
9 | 9 | from i18n import _ |
|
10 | 10 | import error, scmutil, util, filemerge, copies, subrepo |
|
11 | 11 | import errno, os, shutil |
|
12 | 12 | |
|
13 | 13 | class mergestate(object): |
|
14 | 14 | '''track 3-way merge state of individual files''' |
|
15 | 15 | def __init__(self, repo): |
|
16 | 16 | self._repo = repo |
|
17 | 17 | self._dirty = False |
|
18 | 18 | self._read() |
|
19 | 19 | def reset(self, node=None): |
|
20 | 20 | self._state = {} |
|
21 | 21 | if node: |
|
22 | 22 | self._local = node |
|
23 | 23 | shutil.rmtree(self._repo.join("merge"), True) |
|
24 | 24 | self._dirty = False |
|
25 | 25 | def _read(self): |
|
26 | 26 | self._state = {} |
|
27 | 27 | try: |
|
28 | 28 | f = self._repo.opener("merge/state") |
|
29 | 29 | for i, l in enumerate(f): |
|
30 | 30 | if i == 0: |
|
31 | 31 | self._local = bin(l[:-1]) |
|
32 | 32 | else: |
|
33 | 33 | bits = l[:-1].split("\0") |
|
34 | 34 | self._state[bits[0]] = bits[1:] |
|
35 | 35 | f.close() |
|
36 | 36 | except IOError, err: |
|
37 | 37 | if err.errno != errno.ENOENT: |
|
38 | 38 | raise |
|
39 | 39 | self._dirty = False |
|
40 | 40 | def commit(self): |
|
41 | 41 | if self._dirty: |
|
42 | 42 | f = self._repo.opener("merge/state", "w") |
|
43 | 43 | f.write(hex(self._local) + "\n") |
|
44 | 44 | for d, v in self._state.iteritems(): |
|
45 | 45 | f.write("\0".join([d] + v) + "\n") |
|
46 | 46 | f.close() |
|
47 | 47 | self._dirty = False |
|
48 | 48 | def add(self, fcl, fco, fca, fd, flags): |
|
49 | 49 | hash = util.sha1(fcl.path()).hexdigest() |
|
50 | 50 | self._repo.opener.write("merge/" + hash, fcl.data()) |
|
51 | 51 | self._state[fd] = ['u', hash, fcl.path(), fca.path(), |
|
52 | 52 | hex(fca.filenode()), fco.path(), flags] |
|
53 | 53 | self._dirty = True |
|
54 | 54 | def __contains__(self, dfile): |
|
55 | 55 | return dfile in self._state |
|
56 | 56 | def __getitem__(self, dfile): |
|
57 | 57 | return self._state[dfile][0] |
|
58 | 58 | def __iter__(self): |
|
59 | 59 | l = self._state.keys() |
|
60 | 60 | l.sort() |
|
61 | 61 | for f in l: |
|
62 | 62 | yield f |
|
63 | 63 | def mark(self, dfile, state): |
|
64 | 64 | self._state[dfile][0] = state |
|
65 | 65 | self._dirty = True |
|
66 | 66 | def resolve(self, dfile, wctx, octx): |
|
67 | 67 | if self[dfile] == 'r': |
|
68 | 68 | return 0 |
|
69 | 69 | state, hash, lfile, afile, anode, ofile, flags = self._state[dfile] |
|
70 | 70 | f = self._repo.opener("merge/" + hash) |
|
71 | 71 | self._repo.wwrite(dfile, f.read(), flags) |
|
72 | 72 | f.close() |
|
73 | 73 | fcd = wctx[dfile] |
|
74 | 74 | fco = octx[ofile] |
|
75 | 75 | fca = self._repo.filectx(afile, fileid=anode) |
|
76 | 76 | r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca) |
|
77 | 77 | if r is None: |
|
78 | 78 | # no real conflict |
|
79 | 79 | del self._state[dfile] |
|
80 | 80 | elif not r: |
|
81 | 81 | self.mark(dfile, 'r') |
|
82 | 82 | return r |
|
83 | 83 | |
|
84 | 84 | def _checkunknownfile(repo, wctx, mctx, f): |
|
85 | 85 | return (not repo.dirstate._ignore(f) |
|
86 | 86 | and os.path.isfile(repo.wjoin(f)) |
|
87 | 87 | and repo.dirstate.normalize(f) not in repo.dirstate |
|
88 | 88 | and mctx[f].cmp(wctx[f])) |
|
89 | 89 | |
|
90 | 90 | def _checkunknown(repo, wctx, mctx): |
|
91 | 91 | "check for collisions between unknown files and files in mctx" |
|
92 | 92 | |
|
93 | 93 | error = False |
|
94 | 94 | for f in mctx: |
|
95 | 95 | if f not in wctx and _checkunknownfile(repo, wctx, mctx, f): |
|
96 | 96 | error = True |
|
97 | 97 | wctx._repo.ui.warn(_("%s: untracked file differs\n") % f) |
|
98 | 98 | if error: |
|
99 | 99 | raise util.Abort(_("untracked files in working directory differ " |
|
100 | 100 | "from files in requested revision")) |
|
101 | 101 | |
|
102 | 102 | def _remains(f, m, ma, workingctx=False): |
|
103 | 103 | """check whether specified file remains after merge. |
|
104 | 104 | |
|
105 | 105 | It is assumed that specified file is not contained in the manifest |
|
106 | 106 | of the other context. |
|
107 | 107 | """ |
|
108 | 108 | if f in ma: |
|
109 | 109 | n = m[f] |
|
110 | 110 | if n != ma[f]: |
|
111 | 111 | return True # because it is changed locally |
|
112 | 112 | # even though it doesn't remain, if "remote deleted" is |
|
113 | 113 | # chosen in manifestmerge() |
|
114 | 114 | elif workingctx and n[20:] == "a": |
|
115 | 115 | return True # because it is added locally (linear merge specific) |
|
116 | 116 | else: |
|
117 | 117 | return False # because it is removed remotely |
|
118 | 118 | else: |
|
119 | 119 | return True # because it is added locally |
|
120 | 120 | |
|
121 | 121 | def _checkcollision(mctx, extractxs): |
|
122 | 122 | "check for case folding collisions in the destination context" |
|
123 | 123 | folded = {} |
|
124 | 124 | for fn in mctx: |
|
125 | 125 | fold = util.normcase(fn) |
|
126 | 126 | if fold in folded: |
|
127 | 127 | raise util.Abort(_("case-folding collision between %s and %s") |
|
128 | 128 | % (fn, folded[fold])) |
|
129 | 129 | folded[fold] = fn |
|
130 | 130 | |
|
131 | 131 | if extractxs: |
|
132 | 132 | wctx, actx = extractxs |
|
133 | 133 | # class to delay looking up copy mapping |
|
134 | 134 | class pathcopies(object): |
|
135 | 135 | @util.propertycache |
|
136 | 136 | def map(self): |
|
137 | 137 | # {dst@mctx: src@wctx} copy mapping |
|
138 | 138 | return copies.pathcopies(wctx, mctx) |
|
139 | 139 | pc = pathcopies() |
|
140 | 140 | |
|
141 | 141 | for fn in wctx: |
|
142 | 142 | fold = util.normcase(fn) |
|
143 | 143 | mfn = folded.get(fold, None) |
|
144 | 144 | if (mfn and mfn != fn and pc.map.get(mfn) != fn and |
|
145 | 145 | _remains(fn, wctx.manifest(), actx.manifest(), True) and |
|
146 | 146 | _remains(mfn, mctx.manifest(), actx.manifest())): |
|
147 | 147 | raise util.Abort(_("case-folding collision between %s and %s") |
|
148 | 148 | % (mfn, fn)) |
|
149 | 149 | |
|
150 | 150 | def _forgetremoved(wctx, mctx, branchmerge): |
|
151 | 151 | """ |
|
152 | 152 | Forget removed files |
|
153 | 153 | |
|
154 | 154 | If we're jumping between revisions (as opposed to merging), and if |
|
155 | 155 | neither the working directory nor the target rev has the file, |
|
156 | 156 | then we need to remove it from the dirstate, to prevent the |
|
157 | 157 | dirstate from listing the file when it is no longer in the |
|
158 | 158 | manifest. |
|
159 | 159 | |
|
160 | 160 | If we're merging, and the other revision has removed a file |
|
161 | 161 | that is not present in the working directory, we need to mark it |
|
162 | 162 | as removed. |
|
163 | 163 | """ |
|
164 | 164 | |
|
165 | 165 | action = [] |
|
166 | 166 | state = branchmerge and 'r' or 'f' |
|
167 | 167 | for f in wctx.deleted(): |
|
168 | 168 | if f not in mctx: |
|
169 | 169 | action.append((f, state)) |
|
170 | 170 | |
|
171 | 171 | if not branchmerge: |
|
172 | 172 | for f in wctx.removed(): |
|
173 | 173 | if f not in mctx: |
|
174 | 174 | action.append((f, "f")) |
|
175 | 175 | |
|
176 | 176 | return action |
|
177 | 177 | |
|
178 | 178 | def manifestmerge(repo, p1, p2, pa, overwrite, partial): |
|
179 | 179 | """ |
|
180 | 180 | Merge p1 and p2 with ancestor pa and generate merge action list |
|
181 | 181 | |
|
182 | 182 | overwrite = whether we clobber working files |
|
183 | 183 | partial = function to filter file lists |
|
184 | 184 | """ |
|
185 | 185 | |
|
186 | 186 | def fmerge(f, f2, fa): |
|
187 | 187 | """merge flags""" |
|
188 | 188 | a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2) |
|
189 | 189 | if m == n: # flags agree |
|
190 | 190 | return m # unchanged |
|
191 | 191 | if m and n and not a: # flags set, don't agree, differ from parent |
|
192 | 192 | r = repo.ui.promptchoice( |
|
193 | 193 | _(" conflicting flags for %s\n" |
|
194 | 194 | "(n)one, e(x)ec or sym(l)ink?") % f, |
|
195 | 195 | (_("&None"), _("E&xec"), _("Sym&link")), 0) |
|
196 | 196 | if r == 1: |
|
197 | 197 | return "x" # Exec |
|
198 | 198 | if r == 2: |
|
199 | 199 | return "l" # Symlink |
|
200 | 200 | return "" |
|
201 | 201 | if m and m != a: # changed from a to m |
|
202 | 202 | return m |
|
203 | 203 | if n and n != a: # changed from a to n |
|
204 | 204 | if (n == 'l' or a == 'l') and m1.get(f) != ma.get(f): |
|
205 | 205 | # can't automatically merge symlink flag when there |
|
206 | 206 | # are file-level conflicts here, let filemerge take |
|
207 | 207 | # care of it |
|
208 | 208 | return m |
|
209 | 209 | return n |
|
210 | 210 | return '' # flag was cleared |
|
211 | 211 | |
|
212 | 212 | def act(msg, m, f, *args): |
|
213 | 213 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) |
|
214 | 214 | action.append((f, m) + args) |
|
215 | 215 | |
|
216 | action, copy = [], {} | |
|
216 | action, copy, movewithdir = [], {}, {} | |
|
217 | 217 | |
|
218 | 218 | if overwrite: |
|
219 | 219 | pa = p1 |
|
220 | 220 | elif pa == p2: # backwards |
|
221 | 221 | pa = p1.p1() |
|
222 | 222 | elif pa and repo.ui.configbool("merge", "followcopies", True): |
|
223 |
|
|
|
223 | ret = copies.mergecopies(repo, p1, p2, pa) | |
|
224 | copy, movewithdir, diverge, renamedelete = ret | |
|
224 | 225 | for of, fl in diverge.iteritems(): |
|
225 | 226 | act("divergent renames", "dr", of, fl) |
|
226 | 227 | for of, fl in renamedelete.iteritems(): |
|
227 | 228 | act("rename and delete", "rd", of, fl) |
|
228 | 229 | |
|
229 | 230 | repo.ui.note(_("resolving manifests\n")) |
|
230 | 231 | repo.ui.debug(" overwrite: %s, partial: %s\n" |
|
231 | 232 | % (bool(overwrite), bool(partial))) |
|
232 | 233 | repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, p1, p2)) |
|
233 | 234 | |
|
234 | 235 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
|
235 | 236 | copied = set(copy.values()) |
|
237 | copied.update(movewithdir.values()) | |
|
236 | 238 | |
|
237 | 239 | if '.hgsubstate' in m1: |
|
238 | 240 | # check whether sub state is modified |
|
239 | 241 | for s in p1.substate: |
|
240 | 242 | if p1.sub(s).dirty(): |
|
241 | 243 | m1['.hgsubstate'] += "+" |
|
242 | 244 | break |
|
243 | 245 | |
|
244 | 246 | # Compare manifests |
|
245 | 247 | for f, n in m1.iteritems(): |
|
246 | 248 | if partial and not partial(f): |
|
247 | 249 | continue |
|
248 | 250 | if f in m2: |
|
249 | 251 | rflags = fmerge(f, f, f) |
|
250 | 252 | a = ma.get(f, nullid) |
|
251 | 253 | if n == m2[f] or m2[f] == a: # same or local newer |
|
252 | 254 | # is file locally modified or flags need changing? |
|
253 | 255 | # dirstate flags may need to be made current |
|
254 | 256 | if m1.flags(f) != rflags or n[20:]: |
|
255 | 257 | act("update permissions", "e", f, rflags) |
|
256 | 258 | elif n == a: # remote newer |
|
257 | 259 | act("remote is newer", "g", f, rflags) |
|
258 | 260 | else: # both changed |
|
259 | 261 | act("versions differ", "m", f, f, f, rflags, False) |
|
260 | 262 | elif f in copied: # files we'll deal with on m2 side |
|
261 | 263 | pass |
|
262 | elif f in copy: | |
|
264 | elif f in movewithdir: # directory rename | |
|
265 | f2 = movewithdir[f] | |
|
266 | act("remote renamed directory to " + f2, "d", f, None, f2, | |
|
267 | m1.flags(f)) | |
|
268 | elif f in copy: # case 2 A,B/B/B or case 4,21 A/B/B | |
|
263 | 269 | f2 = copy[f] |
|
264 | if f2 not in m2: # directory rename | |
|
265 | act("remote renamed directory to " + f2, "d", | |
|
266 | f, None, f2, m1.flags(f)) | |
|
267 | else: # case 2 A,B/B/B or case 4,21 A/B/B | |
|
268 | act("local copied/moved to " + f2, "m", | |
|
269 | f, f2, f, fmerge(f, f2, f2), False) | |
|
270 | act("local copied/moved to " + f2, "m", f, f2, f, | |
|
271 | fmerge(f, f2, f2), False) | |
|
270 | 272 | elif f in ma: # clean, a different, no remote |
|
271 | 273 | if n != ma[f]: |
|
272 | 274 | if repo.ui.promptchoice( |
|
273 | 275 | _(" local changed %s which remote deleted\n" |
|
274 | 276 | "use (c)hanged version or (d)elete?") % f, |
|
275 | 277 | (_("&Changed"), _("&Delete")), 0): |
|
276 | 278 | act("prompt delete", "r", f) |
|
277 | 279 | else: |
|
278 | 280 | act("prompt keep", "a", f) |
|
279 | 281 | elif n[20:] == "a": # added, no remote |
|
280 | 282 | act("remote deleted", "f", f) |
|
281 | 283 | else: |
|
282 | 284 | act("other deleted", "r", f) |
|
283 | 285 | |
|
284 | 286 | for f, n in m2.iteritems(): |
|
285 | 287 | if partial and not partial(f): |
|
286 | 288 | continue |
|
287 | 289 | if f in m1 or f in copied: # files already visited |
|
288 | 290 | continue |
|
289 |
if f in |
|
|
291 | if f in movewithdir: | |
|
292 | f2 = movewithdir[f] | |
|
293 | act("local renamed directory to " + f2, "d", None, f, f2, | |
|
294 | m2.flags(f)) | |
|
295 | elif f in copy: | |
|
290 | 296 | f2 = copy[f] |
|
291 |
if f2 |
|
|
292 | act("local renamed directory to " + f2, "d", | |
|
293 | None, f, f2, m2.flags(f)) | |
|
294 | elif f2 in m2: # rename case 1, A/A,B/A | |
|
297 | if f2 in m2: # rename case 1, A/A,B/A | |
|
295 | 298 | act("remote copied to " + f, "m", |
|
296 | 299 | f2, f, f, fmerge(f2, f, f2), False) |
|
297 | 300 | else: # case 3,20 A/B/A |
|
298 | 301 | act("remote moved to " + f, "m", |
|
299 | 302 | f2, f, f, fmerge(f2, f, f2), True) |
|
300 | 303 | elif f not in ma: |
|
301 | 304 | if (not overwrite |
|
302 | 305 | and _checkunknownfile(repo, p1, p2, f)): |
|
303 | 306 | rflags = fmerge(f, f, f) |
|
304 | 307 | act("remote differs from untracked local", |
|
305 | 308 | "m", f, f, f, rflags, False) |
|
306 | 309 | else: |
|
307 | 310 | act("remote created", "g", f, m2.flags(f)) |
|
308 | 311 | elif n != ma[f]: |
|
309 | 312 | if repo.ui.promptchoice( |
|
310 | 313 | _("remote changed %s which local deleted\n" |
|
311 | 314 | "use (c)hanged version or leave (d)eleted?") % f, |
|
312 | 315 | (_("&Changed"), _("&Deleted")), 0) == 0: |
|
313 | 316 | act("prompt recreating", "g", f, m2.flags(f)) |
|
314 | 317 | |
|
315 | 318 | return action |
|
316 | 319 | |
|
317 | 320 | def actionkey(a): |
|
318 | 321 | return a[1] == 'r' and -1 or 0, a |
|
319 | 322 | |
|
320 | 323 | def applyupdates(repo, action, wctx, mctx, actx, overwrite): |
|
321 | 324 | """apply the merge action list to the working directory |
|
322 | 325 | |
|
323 | 326 | wctx is the working copy context |
|
324 | 327 | mctx is the context to be merged into the working copy |
|
325 | 328 | actx is the context of the common ancestor |
|
326 | 329 | |
|
327 | 330 | Return a tuple of counts (updated, merged, removed, unresolved) that |
|
328 | 331 | describes how many files were affected by the update. |
|
329 | 332 | """ |
|
330 | 333 | |
|
331 | 334 | updated, merged, removed, unresolved = 0, 0, 0, 0 |
|
332 | 335 | ms = mergestate(repo) |
|
333 | 336 | ms.reset(wctx.p1().node()) |
|
334 | 337 | moves = [] |
|
335 | 338 | action.sort(key=actionkey) |
|
336 | 339 | |
|
337 | 340 | # prescan for merges |
|
338 | 341 | for a in action: |
|
339 | 342 | f, m = a[:2] |
|
340 | 343 | if m == 'm': # merge |
|
341 | 344 | f2, fd, flags, move = a[2:] |
|
342 | 345 | if f == '.hgsubstate': # merged internally |
|
343 | 346 | continue |
|
344 | 347 | repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd)) |
|
345 | 348 | fcl = wctx[f] |
|
346 | 349 | fco = mctx[f2] |
|
347 | 350 | if mctx == actx: # backwards, use working dir parent as ancestor |
|
348 | 351 | if fcl.parents(): |
|
349 | 352 | fca = fcl.p1() |
|
350 | 353 | else: |
|
351 | 354 | fca = repo.filectx(f, fileid=nullrev) |
|
352 | 355 | else: |
|
353 | 356 | fca = fcl.ancestor(fco, actx) |
|
354 | 357 | if not fca: |
|
355 | 358 | fca = repo.filectx(f, fileid=nullrev) |
|
356 | 359 | ms.add(fcl, fco, fca, fd, flags) |
|
357 | 360 | if f != fd and move: |
|
358 | 361 | moves.append(f) |
|
359 | 362 | |
|
360 | 363 | audit = scmutil.pathauditor(repo.root) |
|
361 | 364 | |
|
362 | 365 | # remove renamed files after safely stored |
|
363 | 366 | for f in moves: |
|
364 | 367 | if os.path.lexists(repo.wjoin(f)): |
|
365 | 368 | repo.ui.debug("removing %s\n" % f) |
|
366 | 369 | audit(f) |
|
367 | 370 | os.unlink(repo.wjoin(f)) |
|
368 | 371 | |
|
369 | 372 | numupdates = len(action) |
|
370 | 373 | for i, a in enumerate(action): |
|
371 | 374 | f, m = a[:2] |
|
372 | 375 | repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates, |
|
373 | 376 | unit=_('files')) |
|
374 | 377 | if f and f[0] == "/": |
|
375 | 378 | continue |
|
376 | 379 | if m == "r": # remove |
|
377 | 380 | repo.ui.note(_("removing %s\n") % f) |
|
378 | 381 | audit(f) |
|
379 | 382 | if f == '.hgsubstate': # subrepo states need updating |
|
380 | 383 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
|
381 | 384 | try: |
|
382 | 385 | util.unlinkpath(repo.wjoin(f)) |
|
383 | 386 | except OSError, inst: |
|
384 | 387 | if inst.errno != errno.ENOENT: |
|
385 | 388 | repo.ui.warn(_("update failed to remove %s: %s!\n") % |
|
386 | 389 | (f, inst.strerror)) |
|
387 | 390 | removed += 1 |
|
388 | 391 | elif m == "m": # merge |
|
389 | 392 | if f == '.hgsubstate': # subrepo states need updating |
|
390 | 393 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), |
|
391 | 394 | overwrite) |
|
392 | 395 | continue |
|
393 | 396 | f2, fd, flags, move = a[2:] |
|
394 | 397 | repo.wopener.audit(fd) |
|
395 | 398 | r = ms.resolve(fd, wctx, mctx) |
|
396 | 399 | if r is not None and r > 0: |
|
397 | 400 | unresolved += 1 |
|
398 | 401 | else: |
|
399 | 402 | if r is None: |
|
400 | 403 | updated += 1 |
|
401 | 404 | else: |
|
402 | 405 | merged += 1 |
|
403 | 406 | if (move and repo.dirstate.normalize(fd) != f |
|
404 | 407 | and os.path.lexists(repo.wjoin(f))): |
|
405 | 408 | repo.ui.debug("removing %s\n" % f) |
|
406 | 409 | audit(f) |
|
407 | 410 | os.unlink(repo.wjoin(f)) |
|
408 | 411 | elif m == "g": # get |
|
409 | 412 | flags = a[2] |
|
410 | 413 | repo.ui.note(_("getting %s\n") % f) |
|
411 | 414 | t = mctx.filectx(f).data() |
|
412 | 415 | repo.wwrite(f, t, flags) |
|
413 | 416 | t = None |
|
414 | 417 | updated += 1 |
|
415 | 418 | if f == '.hgsubstate': # subrepo states need updating |
|
416 | 419 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
|
417 | 420 | elif m == "d": # directory rename |
|
418 | 421 | f2, fd, flags = a[2:] |
|
419 | 422 | if f: |
|
420 | 423 | repo.ui.note(_("moving %s to %s\n") % (f, fd)) |
|
421 | 424 | audit(f) |
|
422 | 425 | t = wctx.filectx(f).data() |
|
423 | 426 | repo.wwrite(fd, t, flags) |
|
424 | 427 | util.unlinkpath(repo.wjoin(f)) |
|
425 | 428 | if f2: |
|
426 | 429 | repo.ui.note(_("getting %s to %s\n") % (f2, fd)) |
|
427 | 430 | t = mctx.filectx(f2).data() |
|
428 | 431 | repo.wwrite(fd, t, flags) |
|
429 | 432 | updated += 1 |
|
430 | 433 | elif m == "dr": # divergent renames |
|
431 | 434 | fl = a[2] |
|
432 | 435 | repo.ui.warn(_("note: possible conflict - %s was renamed " |
|
433 | 436 | "multiple times to:\n") % f) |
|
434 | 437 | for nf in fl: |
|
435 | 438 | repo.ui.warn(" %s\n" % nf) |
|
436 | 439 | elif m == "rd": # rename and delete |
|
437 | 440 | fl = a[2] |
|
438 | 441 | repo.ui.warn(_("note: possible conflict - %s was deleted " |
|
439 | 442 | "and renamed to:\n") % f) |
|
440 | 443 | for nf in fl: |
|
441 | 444 | repo.ui.warn(" %s\n" % nf) |
|
442 | 445 | elif m == "e": # exec |
|
443 | 446 | flags = a[2] |
|
444 | 447 | repo.wopener.audit(f) |
|
445 | 448 | util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags) |
|
446 | 449 | ms.commit() |
|
447 | 450 | repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files')) |
|
448 | 451 | |
|
449 | 452 | return updated, merged, removed, unresolved |
|
450 | 453 | |
|
451 | 454 | def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial): |
|
452 | 455 | "Calculate the actions needed to merge mctx into tctx" |
|
453 | 456 | action = [] |
|
454 | 457 | folding = not util.checkcase(repo.path) |
|
455 | 458 | if folding: |
|
456 | 459 | # collision check is not needed for clean update |
|
457 | 460 | if (not branchmerge and |
|
458 | 461 | (force or not tctx.dirty(missing=True, branch=False))): |
|
459 | 462 | _checkcollision(mctx, None) |
|
460 | 463 | else: |
|
461 | 464 | _checkcollision(mctx, (tctx, ancestor)) |
|
462 | 465 | if not force: |
|
463 | 466 | _checkunknown(repo, tctx, mctx) |
|
464 | 467 | if tctx.rev() is None: |
|
465 | 468 | action += _forgetremoved(tctx, mctx, branchmerge) |
|
466 | 469 | action += manifestmerge(repo, tctx, mctx, |
|
467 | 470 | ancestor, |
|
468 | 471 | force and not branchmerge, |
|
469 | 472 | partial) |
|
470 | 473 | return action |
|
471 | 474 | |
|
472 | 475 | def recordupdates(repo, action, branchmerge): |
|
473 | 476 | "record merge actions to the dirstate" |
|
474 | 477 | |
|
475 | 478 | for a in action: |
|
476 | 479 | f, m = a[:2] |
|
477 | 480 | if m == "r": # remove |
|
478 | 481 | if branchmerge: |
|
479 | 482 | repo.dirstate.remove(f) |
|
480 | 483 | else: |
|
481 | 484 | repo.dirstate.drop(f) |
|
482 | 485 | elif m == "a": # re-add |
|
483 | 486 | if not branchmerge: |
|
484 | 487 | repo.dirstate.add(f) |
|
485 | 488 | elif m == "f": # forget |
|
486 | 489 | repo.dirstate.drop(f) |
|
487 | 490 | elif m == "e": # exec change |
|
488 | 491 | repo.dirstate.normallookup(f) |
|
489 | 492 | elif m == "g": # get |
|
490 | 493 | if branchmerge: |
|
491 | 494 | repo.dirstate.otherparent(f) |
|
492 | 495 | else: |
|
493 | 496 | repo.dirstate.normal(f) |
|
494 | 497 | elif m == "m": # merge |
|
495 | 498 | f2, fd, flag, move = a[2:] |
|
496 | 499 | if branchmerge: |
|
497 | 500 | # We've done a branch merge, mark this file as merged |
|
498 | 501 | # so that we properly record the merger later |
|
499 | 502 | repo.dirstate.merge(fd) |
|
500 | 503 | if f != f2: # copy/rename |
|
501 | 504 | if move: |
|
502 | 505 | repo.dirstate.remove(f) |
|
503 | 506 | if f != fd: |
|
504 | 507 | repo.dirstate.copy(f, fd) |
|
505 | 508 | else: |
|
506 | 509 | repo.dirstate.copy(f2, fd) |
|
507 | 510 | else: |
|
508 | 511 | # We've update-merged a locally modified file, so |
|
509 | 512 | # we set the dirstate to emulate a normal checkout |
|
510 | 513 | # of that file some time in the past. Thus our |
|
511 | 514 | # merge will appear as a normal local file |
|
512 | 515 | # modification. |
|
513 | 516 | if f2 == fd: # file not locally copied/moved |
|
514 | 517 | repo.dirstate.normallookup(fd) |
|
515 | 518 | if move: |
|
516 | 519 | repo.dirstate.drop(f) |
|
517 | 520 | elif m == "d": # directory rename |
|
518 | 521 | f2, fd, flag = a[2:] |
|
519 | 522 | if not f2 and f not in repo.dirstate: |
|
520 | 523 | # untracked file moved |
|
521 | 524 | continue |
|
522 | 525 | if branchmerge: |
|
523 | 526 | repo.dirstate.add(fd) |
|
524 | 527 | if f: |
|
525 | 528 | repo.dirstate.remove(f) |
|
526 | 529 | repo.dirstate.copy(f, fd) |
|
527 | 530 | if f2: |
|
528 | 531 | repo.dirstate.copy(f2, fd) |
|
529 | 532 | else: |
|
530 | 533 | repo.dirstate.normal(fd) |
|
531 | 534 | if f: |
|
532 | 535 | repo.dirstate.drop(f) |
|
533 | 536 | |
|
534 | 537 | def update(repo, node, branchmerge, force, partial, ancestor=None, |
|
535 | 538 | mergeancestor=False): |
|
536 | 539 | """ |
|
537 | 540 | Perform a merge between the working directory and the given node |
|
538 | 541 | |
|
539 | 542 | node = the node to update to, or None if unspecified |
|
540 | 543 | branchmerge = whether to merge between branches |
|
541 | 544 | force = whether to force branch merging or file overwriting |
|
542 | 545 | partial = a function to filter file lists (dirstate not updated) |
|
543 | 546 | mergeancestor = if false, merging with an ancestor (fast-forward) |
|
544 | 547 | is only allowed between different named branches. This flag |
|
545 | 548 | is used by rebase extension as a temporary fix and should be |
|
546 | 549 | avoided in general. |
|
547 | 550 | |
|
548 | 551 | The table below shows all the behaviors of the update command |
|
549 | 552 | given the -c and -C or no options, whether the working directory |
|
550 | 553 | is dirty, whether a revision is specified, and the relationship of |
|
551 | 554 | the parent rev to the target rev (linear, on the same named |
|
552 | 555 | branch, or on another named branch). |
|
553 | 556 | |
|
554 | 557 | This logic is tested by test-update-branches.t. |
|
555 | 558 | |
|
556 | 559 | -c -C dirty rev | linear same cross |
|
557 | 560 | n n n n | ok (1) x |
|
558 | 561 | n n n y | ok ok ok |
|
559 | 562 | n n y * | merge (2) (2) |
|
560 | 563 | n y * * | --- discard --- |
|
561 | 564 | y n y * | --- (3) --- |
|
562 | 565 | y n n * | --- ok --- |
|
563 | 566 | y y * * | --- (4) --- |
|
564 | 567 | |
|
565 | 568 | x = can't happen |
|
566 | 569 | * = don't-care |
|
567 | 570 | 1 = abort: crosses branches (use 'hg merge' or 'hg update -c') |
|
568 | 571 | 2 = abort: crosses branches (use 'hg merge' to merge or |
|
569 | 572 | use 'hg update -C' to discard changes) |
|
570 | 573 | 3 = abort: uncommitted local changes |
|
571 | 574 | 4 = incompatible options (checked in commands.py) |
|
572 | 575 | |
|
573 | 576 | Return the same tuple as applyupdates(). |
|
574 | 577 | """ |
|
575 | 578 | |
|
576 | 579 | onode = node |
|
577 | 580 | wlock = repo.wlock() |
|
578 | 581 | try: |
|
579 | 582 | wc = repo[None] |
|
580 | 583 | if node is None: |
|
581 | 584 | # tip of current branch |
|
582 | 585 | try: |
|
583 | 586 | node = repo.branchtip(wc.branch()) |
|
584 | 587 | except error.RepoLookupError: |
|
585 | 588 | if wc.branch() == "default": # no default branch! |
|
586 | 589 | node = repo.lookup("tip") # update to tip |
|
587 | 590 | else: |
|
588 | 591 | raise util.Abort(_("branch %s not found") % wc.branch()) |
|
589 | 592 | overwrite = force and not branchmerge |
|
590 | 593 | pl = wc.parents() |
|
591 | 594 | p1, p2 = pl[0], repo[node] |
|
592 | 595 | if ancestor: |
|
593 | 596 | pa = repo[ancestor] |
|
594 | 597 | else: |
|
595 | 598 | pa = p1.ancestor(p2) |
|
596 | 599 | |
|
597 | 600 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) |
|
598 | 601 | |
|
599 | 602 | ### check phase |
|
600 | 603 | if not overwrite and len(pl) > 1: |
|
601 | 604 | raise util.Abort(_("outstanding uncommitted merges")) |
|
602 | 605 | if branchmerge: |
|
603 | 606 | if pa == p2: |
|
604 | 607 | raise util.Abort(_("merging with a working directory ancestor" |
|
605 | 608 | " has no effect")) |
|
606 | 609 | elif pa == p1: |
|
607 | 610 | if not mergeancestor and p1.branch() == p2.branch(): |
|
608 | 611 | raise util.Abort(_("nothing to merge"), |
|
609 | 612 | hint=_("use 'hg update' " |
|
610 | 613 | "or check 'hg heads'")) |
|
611 | 614 | if not force and (wc.files() or wc.deleted()): |
|
612 | 615 | raise util.Abort(_("outstanding uncommitted changes"), |
|
613 | 616 | hint=_("use 'hg status' to list changes")) |
|
614 | 617 | for s in wc.substate: |
|
615 | 618 | if wc.sub(s).dirty(): |
|
616 | 619 | raise util.Abort(_("outstanding uncommitted changes in " |
|
617 | 620 | "subrepository '%s'") % s) |
|
618 | 621 | |
|
619 | 622 | elif not overwrite: |
|
620 | 623 | if pa == p1 or pa == p2: # linear |
|
621 | 624 | pass # all good |
|
622 | 625 | elif wc.dirty(missing=True): |
|
623 | 626 | raise util.Abort(_("crosses branches (merge branches or use" |
|
624 | 627 | " --clean to discard changes)")) |
|
625 | 628 | elif onode is None: |
|
626 | 629 | raise util.Abort(_("crosses branches (merge branches or update" |
|
627 | 630 | " --check to force update)")) |
|
628 | 631 | else: |
|
629 | 632 | # Allow jumping branches if clean and specific rev given |
|
630 | 633 | pa = p1 |
|
631 | 634 | |
|
632 | 635 | ### calculate phase |
|
633 | 636 | action = calculateupdates(repo, wc, p2, pa, branchmerge, force, partial) |
|
634 | 637 | |
|
635 | 638 | ### apply phase |
|
636 | 639 | if not branchmerge: # just jump to the new rev |
|
637 | 640 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' |
|
638 | 641 | if not partial: |
|
639 | 642 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
640 | 643 | |
|
641 | 644 | stats = applyupdates(repo, action, wc, p2, pa, overwrite) |
|
642 | 645 | |
|
643 | 646 | if not partial: |
|
644 | 647 | repo.setparents(fp1, fp2) |
|
645 | 648 | recordupdates(repo, action, branchmerge) |
|
646 | 649 | if not branchmerge: |
|
647 | 650 | repo.dirstate.setbranch(p2.branch()) |
|
648 | 651 | finally: |
|
649 | 652 | wlock.release() |
|
650 | 653 | |
|
651 | 654 | if not partial: |
|
652 | 655 | repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) |
|
653 | 656 | return stats |
General Comments 0
You need to be logged in to leave comments.
Login now