##// END OF EJS Templates
merge with upstream
Benoit Boissinot -
r3320:fa59d676 merge default
parent child Browse files
Show More
@@ -1,1768 +1,1782 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ()
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.revlogopts
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.opener, v)
70 70 self.changelog = changelog.changelog(self.opener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.nodetagscache = None
83 83 self.encodepats = None
84 84 self.decodepats = None
85 85 self.transhandle = None
86 86
87 87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 88
89 89 def url(self):
90 90 return 'file:' + self.root
91 91
92 92 def hook(self, name, throw=False, **args):
93 93 def callhook(hname, funcname):
94 94 '''call python hook. hook is callable object, looked up as
95 95 name in python module. if callable returns "true", hook
96 96 fails, else passes. if hook raises exception, treated as
97 97 hook failure. exception propagates if throw is "true".
98 98
99 99 reason for "true" meaning "hook failed" is so that
100 100 unmodified commands (e.g. mercurial.commands.update) can
101 101 be run as hooks without wrappers to convert return values.'''
102 102
103 103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 104 d = funcname.rfind('.')
105 105 if d == -1:
106 106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 107 % (hname, funcname))
108 108 modname = funcname[:d]
109 109 try:
110 110 obj = __import__(modname)
111 111 except ImportError:
112 112 try:
113 113 # extensions are loaded with hgext_ prefix
114 114 obj = __import__("hgext_%s" % modname)
115 115 except ImportError:
116 116 raise util.Abort(_('%s hook is invalid '
117 117 '(import of "%s" failed)') %
118 118 (hname, modname))
119 119 try:
120 120 for p in funcname.split('.')[1:]:
121 121 obj = getattr(obj, p)
122 122 except AttributeError, err:
123 123 raise util.Abort(_('%s hook is invalid '
124 124 '("%s" is not defined)') %
125 125 (hname, funcname))
126 126 if not callable(obj):
127 127 raise util.Abort(_('%s hook is invalid '
128 128 '("%s" is not callable)') %
129 129 (hname, funcname))
130 130 try:
131 131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 132 except (KeyboardInterrupt, util.SignalInterrupt):
133 133 raise
134 134 except Exception, exc:
135 135 if isinstance(exc, util.Abort):
136 136 self.ui.warn(_('error: %s hook failed: %s\n') %
137 137 (hname, exc.args[0]))
138 138 else:
139 139 self.ui.warn(_('error: %s hook raised an exception: '
140 140 '%s\n') % (hname, exc))
141 141 if throw:
142 142 raise
143 143 self.ui.print_exc()
144 144 return True
145 145 if r:
146 146 if throw:
147 147 raise util.Abort(_('%s hook failed') % hname)
148 148 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 149 return r
150 150
151 151 def runhook(name, cmd):
152 152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 154 r = util.system(cmd, environ=env, cwd=self.root)
155 155 if r:
156 156 desc, r = util.explain_exit(r)
157 157 if throw:
158 158 raise util.Abort(_('%s hook %s') % (name, desc))
159 159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 160 return r
161 161
162 162 r = False
163 163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 164 if hname.split(".", 1)[0] == name and cmd]
165 165 hooks.sort()
166 166 for hname, cmd in hooks:
167 167 if cmd.startswith('python:'):
168 168 r = callhook(hname, cmd[7:].strip()) or r
169 169 else:
170 170 r = runhook(hname, cmd) or r
171 171 return r
172 172
173 173 tag_disallowed = ':\r\n'
174 174
175 175 def tag(self, name, node, message, local, user, date):
176 176 '''tag a revision with a symbolic name.
177 177
178 178 if local is True, the tag is stored in a per-repository file.
179 179 otherwise, it is stored in the .hgtags file, and a new
180 180 changeset is committed with the change.
181 181
182 182 keyword arguments:
183 183
184 184 local: whether to store tag in non-version-controlled file
185 185 (default False)
186 186
187 187 message: commit message to use if committing
188 188
189 189 user: name of user to use if committing
190 190
191 191 date: date tuple to use if committing'''
192 192
193 193 for c in self.tag_disallowed:
194 194 if c in name:
195 195 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 196
197 197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 198
199 199 if local:
200 200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 201 self.hook('tag', node=hex(node), tag=name, local=local)
202 202 return
203 203
204 204 for x in self.status()[:5]:
205 205 if '.hgtags' in x:
206 206 raise util.Abort(_('working copy of .hgtags is changed '
207 207 '(please commit .hgtags manually)'))
208 208
209 209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 210 if self.dirstate.state('.hgtags') == '?':
211 211 self.add(['.hgtags'])
212 212
213 213 self.commit(['.hgtags'], message, user, date)
214 214 self.hook('tag', node=hex(node), tag=name, local=local)
215 215
216 216 def tags(self):
217 217 '''return a mapping of tag to node'''
218 218 if not self.tagscache:
219 219 self.tagscache = {}
220 220
221 221 def parsetag(line, context):
222 222 if not line:
223 223 return
224 224 s = l.split(" ", 1)
225 225 if len(s) != 2:
226 226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 227 return
228 228 node, key = s
229 229 key = key.strip()
230 230 try:
231 231 bin_n = bin(node)
232 232 except TypeError:
233 233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 234 (context, node))
235 235 return
236 236 if bin_n not in self.changelog.nodemap:
237 237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 238 (context, key))
239 239 return
240 240 self.tagscache[key] = bin_n
241 241
242 242 # read the tags file from each head, ending with the tip,
243 243 # and add each tag found to the map, with "newer" ones
244 244 # taking precedence
245 245 heads = self.heads()
246 246 heads.reverse()
247 247 fl = self.file(".hgtags")
248 248 for node in heads:
249 249 change = self.changelog.read(node)
250 250 rev = self.changelog.rev(node)
251 251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 252 if fn is None: continue
253 253 count = 0
254 254 for l in fl.read(fn).splitlines():
255 255 count += 1
256 256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 257 (rev, short(node), count))
258 258 try:
259 259 f = self.opener("localtags")
260 260 count = 0
261 261 for l in f:
262 262 count += 1
263 263 parsetag(l, _("localtags, line %d") % count)
264 264 except IOError:
265 265 pass
266 266
267 267 self.tagscache['tip'] = self.changelog.tip()
268 268
269 269 return self.tagscache
270 270
271 271 def tagslist(self):
272 272 '''return a list of tags ordered by revision'''
273 273 l = []
274 274 for t, n in self.tags().items():
275 275 try:
276 276 r = self.changelog.rev(n)
277 277 except:
278 278 r = -2 # sort to the beginning of the list if unknown
279 279 l.append((r, t, n))
280 280 l.sort()
281 281 return [(t, n) for r, t, n in l]
282 282
283 283 def nodetags(self, node):
284 284 '''return the tags associated with a node'''
285 285 if not self.nodetagscache:
286 286 self.nodetagscache = {}
287 287 for t, n in self.tags().items():
288 288 self.nodetagscache.setdefault(n, []).append(t)
289 289 return self.nodetagscache.get(node, [])
290 290
291 291 def lookup(self, key):
292 292 try:
293 293 return self.tags()[key]
294 294 except KeyError:
295 295 if key == '.':
296 296 key = self.dirstate.parents()[0]
297 297 if key == nullid:
298 298 raise repo.RepoError(_("no revision checked out"))
299 299 try:
300 300 return self.changelog.lookup(key)
301 301 except:
302 302 raise repo.RepoError(_("unknown revision '%s'") % key)
303 303
304 304 def dev(self):
305 305 return os.lstat(self.path).st_dev
306 306
307 307 def local(self):
308 308 return True
309 309
310 310 def join(self, f):
311 311 return os.path.join(self.path, f)
312 312
313 313 def wjoin(self, f):
314 314 return os.path.join(self.root, f)
315 315
316 316 def file(self, f):
317 317 if f[0] == '/':
318 318 f = f[1:]
319 319 return filelog.filelog(self.opener, f, self.revlogversion)
320 320
321 321 def changectx(self, changeid=None):
322 322 return context.changectx(self, changeid)
323 323
324 324 def workingctx(self):
325 325 return context.workingctx(self)
326 326
327 327 def parents(self, changeid=None):
328 328 '''
329 329 get list of changectxs for parents of changeid or working directory
330 330 '''
331 331 if changeid is None:
332 332 pl = self.dirstate.parents()
333 333 else:
334 334 n = self.changelog.lookup(changeid)
335 335 pl = self.changelog.parents(n)
336 336 if pl[1] == nullid:
337 337 return [self.changectx(pl[0])]
338 338 return [self.changectx(pl[0]), self.changectx(pl[1])]
339 339
340 340 def filectx(self, path, changeid=None, fileid=None):
341 341 """changeid can be a changeset revision, node, or tag.
342 342 fileid can be a file revision or node."""
343 343 return context.filectx(self, path, changeid, fileid)
344 344
345 345 def getcwd(self):
346 346 return self.dirstate.getcwd()
347 347
348 348 def wfile(self, f, mode='r'):
349 349 return self.wopener(f, mode)
350 350
351 351 def wread(self, filename):
352 352 if self.encodepats == None:
353 353 l = []
354 354 for pat, cmd in self.ui.configitems("encode"):
355 355 mf = util.matcher(self.root, "", [pat], [], [])[1]
356 356 l.append((mf, cmd))
357 357 self.encodepats = l
358 358
359 359 data = self.wopener(filename, 'r').read()
360 360
361 361 for mf, cmd in self.encodepats:
362 362 if mf(filename):
363 363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
364 364 data = util.filter(data, cmd)
365 365 break
366 366
367 367 return data
368 368
369 369 def wwrite(self, filename, data, fd=None):
370 370 if self.decodepats == None:
371 371 l = []
372 372 for pat, cmd in self.ui.configitems("decode"):
373 373 mf = util.matcher(self.root, "", [pat], [], [])[1]
374 374 l.append((mf, cmd))
375 375 self.decodepats = l
376 376
377 377 for mf, cmd in self.decodepats:
378 378 if mf(filename):
379 379 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
380 380 data = util.filter(data, cmd)
381 381 break
382 382
383 383 if fd:
384 384 return fd.write(data)
385 385 return self.wopener(filename, 'w').write(data)
386 386
387 387 def transaction(self):
388 388 tr = self.transhandle
389 389 if tr != None and tr.running():
390 390 return tr.nest()
391 391
392 392 # save dirstate for rollback
393 393 try:
394 394 ds = self.opener("dirstate").read()
395 395 except IOError:
396 396 ds = ""
397 397 self.opener("journal.dirstate", "w").write(ds)
398 398
399 399 tr = transaction.transaction(self.ui.warn, self.opener,
400 400 self.join("journal"),
401 401 aftertrans(self.path))
402 402 self.transhandle = tr
403 403 return tr
404 404
405 405 def recover(self):
406 406 l = self.lock()
407 407 if os.path.exists(self.join("journal")):
408 408 self.ui.status(_("rolling back interrupted transaction\n"))
409 409 transaction.rollback(self.opener, self.join("journal"))
410 410 self.reload()
411 411 return True
412 412 else:
413 413 self.ui.warn(_("no interrupted transaction available\n"))
414 414 return False
415 415
416 416 def rollback(self, wlock=None):
417 417 if not wlock:
418 418 wlock = self.wlock()
419 419 l = self.lock()
420 420 if os.path.exists(self.join("undo")):
421 421 self.ui.status(_("rolling back last transaction\n"))
422 422 transaction.rollback(self.opener, self.join("undo"))
423 423 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
424 424 self.reload()
425 425 self.wreload()
426 426 else:
427 427 self.ui.warn(_("no rollback information available\n"))
428 428
429 429 def wreload(self):
430 430 self.dirstate.read()
431 431
432 432 def reload(self):
433 433 self.changelog.load()
434 434 self.manifest.load()
435 435 self.tagscache = None
436 436 self.nodetagscache = None
437 437
438 438 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
439 439 desc=None):
440 440 try:
441 441 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
442 442 except lock.LockHeld, inst:
443 443 if not wait:
444 444 raise
445 445 self.ui.warn(_("waiting for lock on %s held by %s\n") %
446 446 (desc, inst.args[0]))
447 447 # default to 600 seconds timeout
448 448 l = lock.lock(self.join(lockname),
449 449 int(self.ui.config("ui", "timeout") or 600),
450 450 releasefn, desc=desc)
451 451 if acquirefn:
452 452 acquirefn()
453 453 return l
454 454
455 455 def lock(self, wait=1):
456 456 return self.do_lock("lock", wait, acquirefn=self.reload,
457 457 desc=_('repository %s') % self.origroot)
458 458
459 459 def wlock(self, wait=1):
460 460 return self.do_lock("wlock", wait, self.dirstate.write,
461 461 self.wreload,
462 462 desc=_('working directory of %s') % self.origroot)
463 463
464 464 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
465 "determine whether a new filenode is needed"
465 """
466 Determine whether a new filenode is needed and what parent
467 and rename information is needed for a file commit.
468
469 Returns (old entry, file parent 1, file parent 2, metadata)
470
471 If old entry is not None, a commit is not needed.
472 """
466 473 fp1 = manifest1.get(filename, nullid)
467 474 fp2 = manifest2.get(filename, nullid)
468 475
469 if fp2 != nullid:
476 meta = {}
477 cp = self.dirstate.copied(filename)
478 if cp:
479 meta["copy"] = cp
480 if not manifest2: # not a branch merge
481 meta["copyrev"] = hex(manifest1.get(cp, nullid))
482 fp2 = nullid
483 elif fp2 != nullid: # copied on remote side
484 meta["copyrev"] = hex(manifest1.get(cp, nullid))
485 else: # copied on local side, reversed
486 meta["copyrev"] = hex(manifest2.get(cp))
487 fp2 = nullid
488 self.ui.debug(_(" %s: copy %s:%s\n") %
489 (filename, cp, meta["copyrev"]))
490 fp1 = nullid
491 elif fp2 != nullid:
470 492 # is one parent an ancestor of the other?
471 493 fpa = filelog.ancestor(fp1, fp2)
472 494 if fpa == fp1:
473 495 fp1, fp2 = fp2, nullid
474 496 elif fpa == fp2:
475 497 fp2 = nullid
476 498
477 499 # is the file unmodified from the parent? report existing entry
478 if fp2 == nullid and text == filelog.read(fp1):
479 return (fp1, None, None)
500 if fp2 == nullid and not filelog.cmp(fp1, text):
501 return (fp1, None, None, {})
480 502
481 return (None, fp1, fp2)
503 return (None, fp1, fp2, meta)
482 504
483 505 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
484 506 orig_parent = self.dirstate.parents()[0] or nullid
485 507 p1 = p1 or self.dirstate.parents()[0] or nullid
486 508 p2 = p2 or self.dirstate.parents()[1] or nullid
487 509 c1 = self.changelog.read(p1)
488 510 c2 = self.changelog.read(p2)
489 511 m1 = self.manifest.read(c1[0]).copy()
490 512 m2 = self.manifest.read(c2[0])
491 513 changed = []
492 514
493 515 if orig_parent == p1:
494 516 update_dirstate = 1
495 517 else:
496 518 update_dirstate = 0
497 519
498 520 if not wlock:
499 521 wlock = self.wlock()
500 522 l = self.lock()
501 523 tr = self.transaction()
502 524 linkrev = self.changelog.count()
503 525 for f in files:
504 526 try:
505 527 t = self.wread(f)
506 528 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
507 529 r = self.file(f)
508 530
509 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
531 entry, fp1, fp2, meta = self.checkfilemerge(f, t, r, m1, m2)
510 532 if entry:
511 533 m1[f] = entry
512 534 continue
513 535
514 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
536 m1[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
515 537 changed.append(f)
516 538 if update_dirstate:
517 539 self.dirstate.update([f], "n")
518 540 except IOError:
519 541 try:
520 542 del m1[f]
521 543 if update_dirstate:
522 544 self.dirstate.forget([f])
523 545 except:
524 546 # deleted from p2?
525 547 pass
526 548
527 549 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
528 550 user = user or self.ui.username()
529 551 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
530 552 tr.close()
531 553 if update_dirstate:
532 554 self.dirstate.setparents(n, nullid)
533 555
534 556 def commit(self, files=None, text="", user=None, date=None,
535 557 match=util.always, force=False, lock=None, wlock=None,
536 558 force_editor=False):
537 559 commit = []
538 560 remove = []
539 561 changed = []
540 562
541 563 if files:
542 564 for f in files:
543 565 s = self.dirstate.state(f)
544 566 if s in 'nmai':
545 567 commit.append(f)
546 568 elif s == 'r':
547 569 remove.append(f)
548 570 else:
549 571 self.ui.warn(_("%s not tracked!\n") % f)
550 572 else:
551 573 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
552 574 commit = modified + added
553 575 remove = removed
554 576
555 577 p1, p2 = self.dirstate.parents()
556 578 c1 = self.changelog.read(p1)
557 579 c2 = self.changelog.read(p2)
558 580 m1 = self.manifest.read(c1[0]).copy()
559 581 m2 = self.manifest.read(c2[0])
560 582
561 583 if not commit and not remove and not force and p2 == nullid:
562 584 self.ui.status(_("nothing changed\n"))
563 585 return None
564 586
565 587 xp1 = hex(p1)
566 588 if p2 == nullid: xp2 = ''
567 589 else: xp2 = hex(p2)
568 590
569 591 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
570 592
571 593 if not wlock:
572 594 wlock = self.wlock()
573 595 if not lock:
574 596 lock = self.lock()
575 597 tr = self.transaction()
576 598
577 599 # check in files
578 600 new = {}
579 601 linkrev = self.changelog.count()
580 602 commit.sort()
581 603 for f in commit:
582 604 self.ui.note(f + "\n")
583 605 try:
584 606 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
585 607 t = self.wread(f)
586 608 except IOError:
587 609 self.ui.warn(_("trouble committing %s!\n") % f)
588 610 raise
589 611
590 612 r = self.file(f)
591 613
592 meta = {}
593 cp = self.dirstate.copied(f)
594 if cp:
595 meta["copy"] = cp
596 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
597 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
598 fp1, fp2 = nullid, nullid
599 else:
600 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
601 if entry:
602 new[f] = entry
603 continue
614 entry, fp1, fp2, meta = self.checkfilemerge(f, t, r, m1, m2)
615 if entry:
616 new[f] = entry
617 continue
604 618
605 619 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
606 620 # remember what we've added so that we can later calculate
607 621 # the files to pull from a set of changesets
608 622 changed.append(f)
609 623
610 624 # update manifest
611 625 m1.update(new)
612 626 for f in remove:
613 627 if f in m1:
614 628 del m1[f]
615 629 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
616 630 (new, remove))
617 631
618 632 # add changeset
619 633 new = new.keys()
620 634 new.sort()
621 635
622 636 user = user or self.ui.username()
623 637 if not text or force_editor:
624 638 edittext = []
625 639 if text:
626 640 edittext.append(text)
627 641 edittext.append("")
628 642 if p2 != nullid:
629 643 edittext.append("HG: branch merge")
630 644 edittext.extend(["HG: changed %s" % f for f in changed])
631 645 edittext.extend(["HG: removed %s" % f for f in remove])
632 646 if not changed and not remove:
633 647 edittext.append("HG: no files changed")
634 648 edittext.append("")
635 649 # run editor in the repository root
636 650 olddir = os.getcwd()
637 651 os.chdir(self.root)
638 652 text = self.ui.edit("\n".join(edittext), user)
639 653 os.chdir(olddir)
640 654
641 655 lines = [line.rstrip() for line in text.rstrip().splitlines()]
642 656 while lines and not lines[0]:
643 657 del lines[0]
644 658 if not lines:
645 659 return None
646 660 text = '\n'.join(lines)
647 661 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
648 662 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
649 663 parent2=xp2)
650 664 tr.close()
651 665
652 666 self.dirstate.setparents(n)
653 667 self.dirstate.update(new, "n")
654 668 self.dirstate.forget(remove)
655 669
656 670 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
657 671 return n
658 672
659 673 def walk(self, node=None, files=[], match=util.always, badmatch=None):
660 674 if node:
661 675 fdict = dict.fromkeys(files)
662 676 for fn in self.manifest.read(self.changelog.read(node)[0]):
663 677 for ffn in fdict:
664 678 # match if the file is the exact name or a directory
665 679 if ffn == fn or fn.startswith("%s/" % ffn):
666 680 del fdict[ffn]
667 681 break
668 682 if match(fn):
669 683 yield 'm', fn
670 684 for fn in fdict:
671 685 if badmatch and badmatch(fn):
672 686 if match(fn):
673 687 yield 'b', fn
674 688 else:
675 689 self.ui.warn(_('%s: No such file in rev %s\n') % (
676 690 util.pathto(self.getcwd(), fn), short(node)))
677 691 else:
678 692 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
679 693 yield src, fn
680 694
681 695 def status(self, node1=None, node2=None, files=[], match=util.always,
682 696 wlock=None, list_ignored=False, list_clean=False):
683 697 """return status of files between two nodes or node and working directory
684 698
685 699 If node1 is None, use the first dirstate parent instead.
686 700 If node2 is None, compare node1 with working directory.
687 701 """
688 702
689 703 def fcmp(fn, mf):
690 704 t1 = self.wread(fn)
691 705 return self.file(fn).cmp(mf.get(fn, nullid), t1)
692 706
693 707 def mfmatches(node):
694 708 change = self.changelog.read(node)
695 709 mf = dict(self.manifest.read(change[0]))
696 710 for fn in mf.keys():
697 711 if not match(fn):
698 712 del mf[fn]
699 713 return mf
700 714
701 715 modified, added, removed, deleted, unknown = [], [], [], [], []
702 716 ignored, clean = [], []
703 717
704 718 compareworking = False
705 719 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
706 720 compareworking = True
707 721
708 722 if not compareworking:
709 723 # read the manifest from node1 before the manifest from node2,
710 724 # so that we'll hit the manifest cache if we're going through
711 725 # all the revisions in parent->child order.
712 726 mf1 = mfmatches(node1)
713 727
714 728 # are we comparing the working directory?
715 729 if not node2:
716 730 if not wlock:
717 731 try:
718 732 wlock = self.wlock(wait=0)
719 733 except lock.LockException:
720 734 wlock = None
721 735 (lookup, modified, added, removed, deleted, unknown,
722 736 ignored, clean) = self.dirstate.status(files, match,
723 737 list_ignored, list_clean)
724 738
725 739 # are we comparing working dir against its parent?
726 740 if compareworking:
727 741 if lookup:
728 742 # do a full compare of any files that might have changed
729 743 mf2 = mfmatches(self.dirstate.parents()[0])
730 744 for f in lookup:
731 745 if fcmp(f, mf2):
732 746 modified.append(f)
733 747 else:
734 748 clean.append(f)
735 749 if wlock is not None:
736 750 self.dirstate.update([f], "n")
737 751 else:
738 752 # we are comparing working dir against non-parent
739 753 # generate a pseudo-manifest for the working dir
740 754 mf2 = mfmatches(self.dirstate.parents()[0])
741 755 for f in lookup + modified + added:
742 756 mf2[f] = ""
743 757 for f in removed:
744 758 if f in mf2:
745 759 del mf2[f]
746 760 else:
747 761 # we are comparing two revisions
748 762 mf2 = mfmatches(node2)
749 763
750 764 if not compareworking:
751 765 # flush lists from dirstate before comparing manifests
752 766 modified, added, clean = [], [], []
753 767
754 768 # make sure to sort the files so we talk to the disk in a
755 769 # reasonable order
756 770 mf2keys = mf2.keys()
757 771 mf2keys.sort()
758 772 for fn in mf2keys:
759 773 if mf1.has_key(fn):
760 774 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
761 775 modified.append(fn)
762 776 elif list_clean:
763 777 clean.append(fn)
764 778 del mf1[fn]
765 779 else:
766 780 added.append(fn)
767 781
768 782 removed = mf1.keys()
769 783
770 784 # sort and return results:
771 785 for l in modified, added, removed, deleted, unknown, ignored, clean:
772 786 l.sort()
773 787 return (modified, added, removed, deleted, unknown, ignored, clean)
774 788
775 789 def add(self, list, wlock=None):
776 790 if not wlock:
777 791 wlock = self.wlock()
778 792 for f in list:
779 793 p = self.wjoin(f)
780 794 if not os.path.exists(p):
781 795 self.ui.warn(_("%s does not exist!\n") % f)
782 796 elif not os.path.isfile(p):
783 797 self.ui.warn(_("%s not added: only files supported currently\n")
784 798 % f)
785 799 elif self.dirstate.state(f) in 'an':
786 800 self.ui.warn(_("%s already tracked!\n") % f)
787 801 else:
788 802 self.dirstate.update([f], "a")
789 803
790 804 def forget(self, list, wlock=None):
791 805 if not wlock:
792 806 wlock = self.wlock()
793 807 for f in list:
794 808 if self.dirstate.state(f) not in 'ai':
795 809 self.ui.warn(_("%s not added!\n") % f)
796 810 else:
797 811 self.dirstate.forget([f])
798 812
799 813 def remove(self, list, unlink=False, wlock=None):
800 814 if unlink:
801 815 for f in list:
802 816 try:
803 817 util.unlink(self.wjoin(f))
804 818 except OSError, inst:
805 819 if inst.errno != errno.ENOENT:
806 820 raise
807 821 if not wlock:
808 822 wlock = self.wlock()
809 823 for f in list:
810 824 p = self.wjoin(f)
811 825 if os.path.exists(p):
812 826 self.ui.warn(_("%s still exists!\n") % f)
813 827 elif self.dirstate.state(f) == 'a':
814 828 self.dirstate.forget([f])
815 829 elif f not in self.dirstate:
816 830 self.ui.warn(_("%s not tracked!\n") % f)
817 831 else:
818 832 self.dirstate.update([f], "r")
819 833
820 834 def undelete(self, list, wlock=None):
821 835 p = self.dirstate.parents()[0]
822 836 mn = self.changelog.read(p)[0]
823 837 m = self.manifest.read(mn)
824 838 if not wlock:
825 839 wlock = self.wlock()
826 840 for f in list:
827 841 if self.dirstate.state(f) not in "r":
828 842 self.ui.warn("%s not removed!\n" % f)
829 843 else:
830 844 t = self.file(f).read(m[f])
831 845 self.wwrite(f, t)
832 846 util.set_exec(self.wjoin(f), m.execf(f))
833 847 self.dirstate.update([f], "n")
834 848
835 849 def copy(self, source, dest, wlock=None):
836 850 p = self.wjoin(dest)
837 851 if not os.path.exists(p):
838 852 self.ui.warn(_("%s does not exist!\n") % dest)
839 853 elif not os.path.isfile(p):
840 854 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
841 855 else:
842 856 if not wlock:
843 857 wlock = self.wlock()
844 858 if self.dirstate.state(dest) == '?':
845 859 self.dirstate.update([dest], "a")
846 860 self.dirstate.copy(source, dest)
847 861
848 862 def heads(self, start=None):
849 863 heads = self.changelog.heads(start)
850 864 # sort the output in rev descending order
851 865 heads = [(-self.changelog.rev(h), h) for h in heads]
852 866 heads.sort()
853 867 return [n for (r, n) in heads]
854 868
855 869 # branchlookup returns a dict giving a list of branches for
856 870 # each head. A branch is defined as the tag of a node or
857 871 # the branch of the node's parents. If a node has multiple
858 872 # branch tags, tags are eliminated if they are visible from other
859 873 # branch tags.
860 874 #
861 875 # So, for this graph: a->b->c->d->e
862 876 # \ /
863 877 # aa -----/
864 878 # a has tag 2.6.12
865 879 # d has tag 2.6.13
866 880 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
867 881 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
868 882 # from the list.
869 883 #
870 884 # It is possible that more than one head will have the same branch tag.
871 885 # callers need to check the result for multiple heads under the same
872 886 # branch tag if that is a problem for them (ie checkout of a specific
873 887 # branch).
874 888 #
875 889 # passing in a specific branch will limit the depth of the search
876 890 # through the parents. It won't limit the branches returned in the
877 891 # result though.
878 892 def branchlookup(self, heads=None, branch=None):
879 893 if not heads:
880 894 heads = self.heads()
881 895 headt = [ h for h in heads ]
882 896 chlog = self.changelog
883 897 branches = {}
884 898 merges = []
885 899 seenmerge = {}
886 900
887 901 # traverse the tree once for each head, recording in the branches
888 902 # dict which tags are visible from this head. The branches
889 903 # dict also records which tags are visible from each tag
890 904 # while we traverse.
891 905 while headt or merges:
892 906 if merges:
893 907 n, found = merges.pop()
894 908 visit = [n]
895 909 else:
896 910 h = headt.pop()
897 911 visit = [h]
898 912 found = [h]
899 913 seen = {}
900 914 while visit:
901 915 n = visit.pop()
902 916 if n in seen:
903 917 continue
904 918 pp = chlog.parents(n)
905 919 tags = self.nodetags(n)
906 920 if tags:
907 921 for x in tags:
908 922 if x == 'tip':
909 923 continue
910 924 for f in found:
911 925 branches.setdefault(f, {})[n] = 1
912 926 branches.setdefault(n, {})[n] = 1
913 927 break
914 928 if n not in found:
915 929 found.append(n)
916 930 if branch in tags:
917 931 continue
918 932 seen[n] = 1
919 933 if pp[1] != nullid and n not in seenmerge:
920 934 merges.append((pp[1], [x for x in found]))
921 935 seenmerge[n] = 1
922 936 if pp[0] != nullid:
923 937 visit.append(pp[0])
924 938 # traverse the branches dict, eliminating branch tags from each
925 939 # head that are visible from another branch tag for that head.
926 940 out = {}
927 941 viscache = {}
928 942 for h in heads:
929 943 def visible(node):
930 944 if node in viscache:
931 945 return viscache[node]
932 946 ret = {}
933 947 visit = [node]
934 948 while visit:
935 949 x = visit.pop()
936 950 if x in viscache:
937 951 ret.update(viscache[x])
938 952 elif x not in ret:
939 953 ret[x] = 1
940 954 if x in branches:
941 955 visit[len(visit):] = branches[x].keys()
942 956 viscache[node] = ret
943 957 return ret
944 958 if h not in branches:
945 959 continue
946 960 # O(n^2), but somewhat limited. This only searches the
947 961 # tags visible from a specific head, not all the tags in the
948 962 # whole repo.
949 963 for b in branches[h]:
950 964 vis = False
951 965 for bb in branches[h].keys():
952 966 if b != bb:
953 967 if b in visible(bb):
954 968 vis = True
955 969 break
956 970 if not vis:
957 971 l = out.setdefault(h, [])
958 972 l[len(l):] = self.nodetags(b)
959 973 return out
960 974
961 975 def branches(self, nodes):
962 976 if not nodes:
963 977 nodes = [self.changelog.tip()]
964 978 b = []
965 979 for n in nodes:
966 980 t = n
967 981 while 1:
968 982 p = self.changelog.parents(n)
969 983 if p[1] != nullid or p[0] == nullid:
970 984 b.append((t, n, p[0], p[1]))
971 985 break
972 986 n = p[0]
973 987 return b
974 988
975 989 def between(self, pairs):
976 990 r = []
977 991
978 992 for top, bottom in pairs:
979 993 n, l, i = top, [], 0
980 994 f = 1
981 995
982 996 while n != bottom:
983 997 p = self.changelog.parents(n)[0]
984 998 if i == f:
985 999 l.append(n)
986 1000 f = f * 2
987 1001 n = p
988 1002 i += 1
989 1003
990 1004 r.append(l)
991 1005
992 1006 return r
993 1007
994 1008 def findincoming(self, remote, base=None, heads=None, force=False):
995 1009 """Return list of roots of the subsets of missing nodes from remote
996 1010
997 1011 If base dict is specified, assume that these nodes and their parents
998 1012 exist on the remote side and that no child of a node of base exists
999 1013 in both remote and self.
1000 1014 Furthermore base will be updated to include the nodes that exists
1001 1015 in self and remote but no children exists in self and remote.
1002 1016 If a list of heads is specified, return only nodes which are heads
1003 1017 or ancestors of these heads.
1004 1018
1005 1019 All the ancestors of base are in self and in remote.
1006 1020 All the descendants of the list returned are missing in self.
1007 1021 (and so we know that the rest of the nodes are missing in remote, see
1008 1022 outgoing)
1009 1023 """
1010 1024 m = self.changelog.nodemap
1011 1025 search = []
1012 1026 fetch = {}
1013 1027 seen = {}
1014 1028 seenbranch = {}
1015 1029 if base == None:
1016 1030 base = {}
1017 1031
1018 1032 if not heads:
1019 1033 heads = remote.heads()
1020 1034
1021 1035 if self.changelog.tip() == nullid:
1022 1036 base[nullid] = 1
1023 1037 if heads != [nullid]:
1024 1038 return [nullid]
1025 1039 return []
1026 1040
1027 1041 # assume we're closer to the tip than the root
1028 1042 # and start by examining the heads
1029 1043 self.ui.status(_("searching for changes\n"))
1030 1044
1031 1045 unknown = []
1032 1046 for h in heads:
1033 1047 if h not in m:
1034 1048 unknown.append(h)
1035 1049 else:
1036 1050 base[h] = 1
1037 1051
1038 1052 if not unknown:
1039 1053 return []
1040 1054
1041 1055 req = dict.fromkeys(unknown)
1042 1056 reqcnt = 0
1043 1057
1044 1058 # search through remote branches
1045 1059 # a 'branch' here is a linear segment of history, with four parts:
1046 1060 # head, root, first parent, second parent
1047 1061 # (a branch always has two parents (or none) by definition)
1048 1062 unknown = remote.branches(unknown)
1049 1063 while unknown:
1050 1064 r = []
1051 1065 while unknown:
1052 1066 n = unknown.pop(0)
1053 1067 if n[0] in seen:
1054 1068 continue
1055 1069
1056 1070 self.ui.debug(_("examining %s:%s\n")
1057 1071 % (short(n[0]), short(n[1])))
1058 1072 if n[0] == nullid: # found the end of the branch
1059 1073 pass
1060 1074 elif n in seenbranch:
1061 1075 self.ui.debug(_("branch already found\n"))
1062 1076 continue
1063 1077 elif n[1] and n[1] in m: # do we know the base?
1064 1078 self.ui.debug(_("found incomplete branch %s:%s\n")
1065 1079 % (short(n[0]), short(n[1])))
1066 1080 search.append(n) # schedule branch range for scanning
1067 1081 seenbranch[n] = 1
1068 1082 else:
1069 1083 if n[1] not in seen and n[1] not in fetch:
1070 1084 if n[2] in m and n[3] in m:
1071 1085 self.ui.debug(_("found new changeset %s\n") %
1072 1086 short(n[1]))
1073 1087 fetch[n[1]] = 1 # earliest unknown
1074 1088 for p in n[2:4]:
1075 1089 if p in m:
1076 1090 base[p] = 1 # latest known
1077 1091
1078 1092 for p in n[2:4]:
1079 1093 if p not in req and p not in m:
1080 1094 r.append(p)
1081 1095 req[p] = 1
1082 1096 seen[n[0]] = 1
1083 1097
1084 1098 if r:
1085 1099 reqcnt += 1
1086 1100 self.ui.debug(_("request %d: %s\n") %
1087 1101 (reqcnt, " ".join(map(short, r))))
1088 1102 for p in range(0, len(r), 10):
1089 1103 for b in remote.branches(r[p:p+10]):
1090 1104 self.ui.debug(_("received %s:%s\n") %
1091 1105 (short(b[0]), short(b[1])))
1092 1106 unknown.append(b)
1093 1107
1094 1108 # do binary search on the branches we found
1095 1109 while search:
1096 1110 n = search.pop(0)
1097 1111 reqcnt += 1
1098 1112 l = remote.between([(n[0], n[1])])[0]
1099 1113 l.append(n[1])
1100 1114 p = n[0]
1101 1115 f = 1
1102 1116 for i in l:
1103 1117 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1104 1118 if i in m:
1105 1119 if f <= 2:
1106 1120 self.ui.debug(_("found new branch changeset %s\n") %
1107 1121 short(p))
1108 1122 fetch[p] = 1
1109 1123 base[i] = 1
1110 1124 else:
1111 1125 self.ui.debug(_("narrowed branch search to %s:%s\n")
1112 1126 % (short(p), short(i)))
1113 1127 search.append((p, i))
1114 1128 break
1115 1129 p, f = i, f * 2
1116 1130
1117 1131 # sanity check our fetch list
1118 1132 for f in fetch.keys():
1119 1133 if f in m:
1120 1134 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1121 1135
1122 1136 if base.keys() == [nullid]:
1123 1137 if force:
1124 1138 self.ui.warn(_("warning: repository is unrelated\n"))
1125 1139 else:
1126 1140 raise util.Abort(_("repository is unrelated"))
1127 1141
1128 1142 self.ui.debug(_("found new changesets starting at ") +
1129 1143 " ".join([short(f) for f in fetch]) + "\n")
1130 1144
1131 1145 self.ui.debug(_("%d total queries\n") % reqcnt)
1132 1146
1133 1147 return fetch.keys()
1134 1148
1135 1149 def findoutgoing(self, remote, base=None, heads=None, force=False):
1136 1150 """Return list of nodes that are roots of subsets not in remote
1137 1151
1138 1152 If base dict is specified, assume that these nodes and their parents
1139 1153 exist on the remote side.
1140 1154 If a list of heads is specified, return only nodes which are heads
1141 1155 or ancestors of these heads, and return a second element which
1142 1156 contains all remote heads which get new children.
1143 1157 """
1144 1158 if base == None:
1145 1159 base = {}
1146 1160 self.findincoming(remote, base, heads, force=force)
1147 1161
1148 1162 self.ui.debug(_("common changesets up to ")
1149 1163 + " ".join(map(short, base.keys())) + "\n")
1150 1164
1151 1165 remain = dict.fromkeys(self.changelog.nodemap)
1152 1166
1153 1167 # prune everything remote has from the tree
1154 1168 del remain[nullid]
1155 1169 remove = base.keys()
1156 1170 while remove:
1157 1171 n = remove.pop(0)
1158 1172 if n in remain:
1159 1173 del remain[n]
1160 1174 for p in self.changelog.parents(n):
1161 1175 remove.append(p)
1162 1176
1163 1177 # find every node whose parents have been pruned
1164 1178 subset = []
1165 1179 # find every remote head that will get new children
1166 1180 updated_heads = {}
1167 1181 for n in remain:
1168 1182 p1, p2 = self.changelog.parents(n)
1169 1183 if p1 not in remain and p2 not in remain:
1170 1184 subset.append(n)
1171 1185 if heads:
1172 1186 if p1 in heads:
1173 1187 updated_heads[p1] = True
1174 1188 if p2 in heads:
1175 1189 updated_heads[p2] = True
1176 1190
1177 1191 # this is the set of all roots we have to push
1178 1192 if heads:
1179 1193 return subset, updated_heads.keys()
1180 1194 else:
1181 1195 return subset
1182 1196
1183 1197 def pull(self, remote, heads=None, force=False, lock=None):
1184 1198 mylock = False
1185 1199 if not lock:
1186 1200 lock = self.lock()
1187 1201 mylock = True
1188 1202
1189 1203 try:
1190 1204 fetch = self.findincoming(remote, force=force)
1191 1205 if fetch == [nullid]:
1192 1206 self.ui.status(_("requesting all changes\n"))
1193 1207
1194 1208 if not fetch:
1195 1209 self.ui.status(_("no changes found\n"))
1196 1210 return 0
1197 1211
1198 1212 if heads is None:
1199 1213 cg = remote.changegroup(fetch, 'pull')
1200 1214 else:
1201 1215 cg = remote.changegroupsubset(fetch, heads, 'pull')
1202 1216 return self.addchangegroup(cg, 'pull', remote.url())
1203 1217 finally:
1204 1218 if mylock:
1205 1219 lock.release()
1206 1220
1207 1221 def push(self, remote, force=False, revs=None):
1208 1222 # there are two ways to push to remote repo:
1209 1223 #
1210 1224 # addchangegroup assumes local user can lock remote
1211 1225 # repo (local filesystem, old ssh servers).
1212 1226 #
1213 1227 # unbundle assumes local user cannot lock remote repo (new ssh
1214 1228 # servers, http servers).
1215 1229
1216 1230 if remote.capable('unbundle'):
1217 1231 return self.push_unbundle(remote, force, revs)
1218 1232 return self.push_addchangegroup(remote, force, revs)
1219 1233
1220 1234 def prepush(self, remote, force, revs):
1221 1235 base = {}
1222 1236 remote_heads = remote.heads()
1223 1237 inc = self.findincoming(remote, base, remote_heads, force=force)
1224 1238 if not force and inc:
1225 1239 self.ui.warn(_("abort: unsynced remote changes!\n"))
1226 1240 self.ui.status(_("(did you forget to sync?"
1227 1241 " use push -f to force)\n"))
1228 1242 return None, 1
1229 1243
1230 1244 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1231 1245 if revs is not None:
1232 1246 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1233 1247 else:
1234 1248 bases, heads = update, self.changelog.heads()
1235 1249
1236 1250 if not bases:
1237 1251 self.ui.status(_("no changes found\n"))
1238 1252 return None, 1
1239 1253 elif not force:
1240 1254 # FIXME we don't properly detect creation of new heads
1241 1255 # in the push -r case, assume the user knows what he's doing
1242 1256 if not revs and len(remote_heads) < len(heads) \
1243 1257 and remote_heads != [nullid]:
1244 1258 self.ui.warn(_("abort: push creates new remote branches!\n"))
1245 1259 self.ui.status(_("(did you forget to merge?"
1246 1260 " use push -f to force)\n"))
1247 1261 return None, 1
1248 1262
1249 1263 if revs is None:
1250 1264 cg = self.changegroup(update, 'push')
1251 1265 else:
1252 1266 cg = self.changegroupsubset(update, revs, 'push')
1253 1267 return cg, remote_heads
1254 1268
1255 1269 def push_addchangegroup(self, remote, force, revs):
1256 1270 lock = remote.lock()
1257 1271
1258 1272 ret = self.prepush(remote, force, revs)
1259 1273 if ret[0] is not None:
1260 1274 cg, remote_heads = ret
1261 1275 return remote.addchangegroup(cg, 'push', self.url())
1262 1276 return ret[1]
1263 1277
1264 1278 def push_unbundle(self, remote, force, revs):
1265 1279 # local repo finds heads on server, finds out what revs it
1266 1280 # must push. once revs transferred, if server finds it has
1267 1281 # different heads (someone else won commit/push race), server
1268 1282 # aborts.
1269 1283
1270 1284 ret = self.prepush(remote, force, revs)
1271 1285 if ret[0] is not None:
1272 1286 cg, remote_heads = ret
1273 1287 if force: remote_heads = ['force']
1274 1288 return remote.unbundle(cg, remote_heads, 'push')
1275 1289 return ret[1]
1276 1290
1277 1291 def changegroupsubset(self, bases, heads, source):
1278 1292 """This function generates a changegroup consisting of all the nodes
1279 1293 that are descendents of any of the bases, and ancestors of any of
1280 1294 the heads.
1281 1295
1282 1296 It is fairly complex as determining which filenodes and which
1283 1297 manifest nodes need to be included for the changeset to be complete
1284 1298 is non-trivial.
1285 1299
1286 1300 Another wrinkle is doing the reverse, figuring out which changeset in
1287 1301 the changegroup a particular filenode or manifestnode belongs to."""
1288 1302
1289 1303 self.hook('preoutgoing', throw=True, source=source)
1290 1304
1291 1305 # Set up some initial variables
1292 1306 # Make it easy to refer to self.changelog
1293 1307 cl = self.changelog
1294 1308 # msng is short for missing - compute the list of changesets in this
1295 1309 # changegroup.
1296 1310 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1297 1311 # Some bases may turn out to be superfluous, and some heads may be
1298 1312 # too. nodesbetween will return the minimal set of bases and heads
1299 1313 # necessary to re-create the changegroup.
1300 1314
1301 1315 # Known heads are the list of heads that it is assumed the recipient
1302 1316 # of this changegroup will know about.
1303 1317 knownheads = {}
1304 1318 # We assume that all parents of bases are known heads.
1305 1319 for n in bases:
1306 1320 for p in cl.parents(n):
1307 1321 if p != nullid:
1308 1322 knownheads[p] = 1
1309 1323 knownheads = knownheads.keys()
1310 1324 if knownheads:
1311 1325 # Now that we know what heads are known, we can compute which
1312 1326 # changesets are known. The recipient must know about all
1313 1327 # changesets required to reach the known heads from the null
1314 1328 # changeset.
1315 1329 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1316 1330 junk = None
1317 1331 # Transform the list into an ersatz set.
1318 1332 has_cl_set = dict.fromkeys(has_cl_set)
1319 1333 else:
1320 1334 # If there were no known heads, the recipient cannot be assumed to
1321 1335 # know about any changesets.
1322 1336 has_cl_set = {}
1323 1337
1324 1338 # Make it easy to refer to self.manifest
1325 1339 mnfst = self.manifest
1326 1340 # We don't know which manifests are missing yet
1327 1341 msng_mnfst_set = {}
1328 1342 # Nor do we know which filenodes are missing.
1329 1343 msng_filenode_set = {}
1330 1344
1331 1345 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1332 1346 junk = None
1333 1347
1334 1348 # A changeset always belongs to itself, so the changenode lookup
1335 1349 # function for a changenode is identity.
1336 1350 def identity(x):
1337 1351 return x
1338 1352
1339 1353 # A function generating function. Sets up an environment for the
1340 1354 # inner function.
1341 1355 def cmp_by_rev_func(revlog):
1342 1356 # Compare two nodes by their revision number in the environment's
1343 1357 # revision history. Since the revision number both represents the
1344 1358 # most efficient order to read the nodes in, and represents a
1345 1359 # topological sorting of the nodes, this function is often useful.
1346 1360 def cmp_by_rev(a, b):
1347 1361 return cmp(revlog.rev(a), revlog.rev(b))
1348 1362 return cmp_by_rev
1349 1363
1350 1364 # If we determine that a particular file or manifest node must be a
1351 1365 # node that the recipient of the changegroup will already have, we can
1352 1366 # also assume the recipient will have all the parents. This function
1353 1367 # prunes them from the set of missing nodes.
1354 1368 def prune_parents(revlog, hasset, msngset):
1355 1369 haslst = hasset.keys()
1356 1370 haslst.sort(cmp_by_rev_func(revlog))
1357 1371 for node in haslst:
1358 1372 parentlst = [p for p in revlog.parents(node) if p != nullid]
1359 1373 while parentlst:
1360 1374 n = parentlst.pop()
1361 1375 if n not in hasset:
1362 1376 hasset[n] = 1
1363 1377 p = [p for p in revlog.parents(n) if p != nullid]
1364 1378 parentlst.extend(p)
1365 1379 for n in hasset:
1366 1380 msngset.pop(n, None)
1367 1381
1368 1382 # This is a function generating function used to set up an environment
1369 1383 # for the inner function to execute in.
1370 1384 def manifest_and_file_collector(changedfileset):
1371 1385 # This is an information gathering function that gathers
1372 1386 # information from each changeset node that goes out as part of
1373 1387 # the changegroup. The information gathered is a list of which
1374 1388 # manifest nodes are potentially required (the recipient may
1375 1389 # already have them) and total list of all files which were
1376 1390 # changed in any changeset in the changegroup.
1377 1391 #
1378 1392 # We also remember the first changenode we saw any manifest
1379 1393 # referenced by so we can later determine which changenode 'owns'
1380 1394 # the manifest.
1381 1395 def collect_manifests_and_files(clnode):
1382 1396 c = cl.read(clnode)
1383 1397 for f in c[3]:
1384 1398 # This is to make sure we only have one instance of each
1385 1399 # filename string for each filename.
1386 1400 changedfileset.setdefault(f, f)
1387 1401 msng_mnfst_set.setdefault(c[0], clnode)
1388 1402 return collect_manifests_and_files
1389 1403
1390 1404 # Figure out which manifest nodes (of the ones we think might be part
1391 1405 # of the changegroup) the recipient must know about and remove them
1392 1406 # from the changegroup.
1393 1407 def prune_manifests():
1394 1408 has_mnfst_set = {}
1395 1409 for n in msng_mnfst_set:
1396 1410 # If a 'missing' manifest thinks it belongs to a changenode
1397 1411 # the recipient is assumed to have, obviously the recipient
1398 1412 # must have that manifest.
1399 1413 linknode = cl.node(mnfst.linkrev(n))
1400 1414 if linknode in has_cl_set:
1401 1415 has_mnfst_set[n] = 1
1402 1416 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1403 1417
1404 1418 # Use the information collected in collect_manifests_and_files to say
1405 1419 # which changenode any manifestnode belongs to.
1406 1420 def lookup_manifest_link(mnfstnode):
1407 1421 return msng_mnfst_set[mnfstnode]
1408 1422
1409 1423 # A function generating function that sets up the initial environment
1410 1424 # the inner function.
1411 1425 def filenode_collector(changedfiles):
1412 1426 next_rev = [0]
1413 1427 # This gathers information from each manifestnode included in the
1414 1428 # changegroup about which filenodes the manifest node references
1415 1429 # so we can include those in the changegroup too.
1416 1430 #
1417 1431 # It also remembers which changenode each filenode belongs to. It
1418 1432 # does this by assuming the a filenode belongs to the changenode
1419 1433 # the first manifest that references it belongs to.
1420 1434 def collect_msng_filenodes(mnfstnode):
1421 1435 r = mnfst.rev(mnfstnode)
1422 1436 if r == next_rev[0]:
1423 1437 # If the last rev we looked at was the one just previous,
1424 1438 # we only need to see a diff.
1425 1439 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1426 1440 # For each line in the delta
1427 1441 for dline in delta.splitlines():
1428 1442 # get the filename and filenode for that line
1429 1443 f, fnode = dline.split('\0')
1430 1444 fnode = bin(fnode[:40])
1431 1445 f = changedfiles.get(f, None)
1432 1446 # And if the file is in the list of files we care
1433 1447 # about.
1434 1448 if f is not None:
1435 1449 # Get the changenode this manifest belongs to
1436 1450 clnode = msng_mnfst_set[mnfstnode]
1437 1451 # Create the set of filenodes for the file if
1438 1452 # there isn't one already.
1439 1453 ndset = msng_filenode_set.setdefault(f, {})
1440 1454 # And set the filenode's changelog node to the
1441 1455 # manifest's if it hasn't been set already.
1442 1456 ndset.setdefault(fnode, clnode)
1443 1457 else:
1444 1458 # Otherwise we need a full manifest.
1445 1459 m = mnfst.read(mnfstnode)
1446 1460 # For every file in we care about.
1447 1461 for f in changedfiles:
1448 1462 fnode = m.get(f, None)
1449 1463 # If it's in the manifest
1450 1464 if fnode is not None:
1451 1465 # See comments above.
1452 1466 clnode = msng_mnfst_set[mnfstnode]
1453 1467 ndset = msng_filenode_set.setdefault(f, {})
1454 1468 ndset.setdefault(fnode, clnode)
1455 1469 # Remember the revision we hope to see next.
1456 1470 next_rev[0] = r + 1
1457 1471 return collect_msng_filenodes
1458 1472
1459 1473 # We have a list of filenodes we think we need for a file, lets remove
1460 1474 # all those we now the recipient must have.
1461 1475 def prune_filenodes(f, filerevlog):
1462 1476 msngset = msng_filenode_set[f]
1463 1477 hasset = {}
1464 1478 # If a 'missing' filenode thinks it belongs to a changenode we
1465 1479 # assume the recipient must have, then the recipient must have
1466 1480 # that filenode.
1467 1481 for n in msngset:
1468 1482 clnode = cl.node(filerevlog.linkrev(n))
1469 1483 if clnode in has_cl_set:
1470 1484 hasset[n] = 1
1471 1485 prune_parents(filerevlog, hasset, msngset)
1472 1486
1473 1487 # A function generator function that sets up the a context for the
1474 1488 # inner function.
1475 1489 def lookup_filenode_link_func(fname):
1476 1490 msngset = msng_filenode_set[fname]
1477 1491 # Lookup the changenode the filenode belongs to.
1478 1492 def lookup_filenode_link(fnode):
1479 1493 return msngset[fnode]
1480 1494 return lookup_filenode_link
1481 1495
1482 1496 # Now that we have all theses utility functions to help out and
1483 1497 # logically divide up the task, generate the group.
1484 1498 def gengroup():
1485 1499 # The set of changed files starts empty.
1486 1500 changedfiles = {}
1487 1501 # Create a changenode group generator that will call our functions
1488 1502 # back to lookup the owning changenode and collect information.
1489 1503 group = cl.group(msng_cl_lst, identity,
1490 1504 manifest_and_file_collector(changedfiles))
1491 1505 for chnk in group:
1492 1506 yield chnk
1493 1507
1494 1508 # The list of manifests has been collected by the generator
1495 1509 # calling our functions back.
1496 1510 prune_manifests()
1497 1511 msng_mnfst_lst = msng_mnfst_set.keys()
1498 1512 # Sort the manifestnodes by revision number.
1499 1513 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1500 1514 # Create a generator for the manifestnodes that calls our lookup
1501 1515 # and data collection functions back.
1502 1516 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1503 1517 filenode_collector(changedfiles))
1504 1518 for chnk in group:
1505 1519 yield chnk
1506 1520
1507 1521 # These are no longer needed, dereference and toss the memory for
1508 1522 # them.
1509 1523 msng_mnfst_lst = None
1510 1524 msng_mnfst_set.clear()
1511 1525
1512 1526 changedfiles = changedfiles.keys()
1513 1527 changedfiles.sort()
1514 1528 # Go through all our files in order sorted by name.
1515 1529 for fname in changedfiles:
1516 1530 filerevlog = self.file(fname)
1517 1531 # Toss out the filenodes that the recipient isn't really
1518 1532 # missing.
1519 1533 if msng_filenode_set.has_key(fname):
1520 1534 prune_filenodes(fname, filerevlog)
1521 1535 msng_filenode_lst = msng_filenode_set[fname].keys()
1522 1536 else:
1523 1537 msng_filenode_lst = []
1524 1538 # If any filenodes are left, generate the group for them,
1525 1539 # otherwise don't bother.
1526 1540 if len(msng_filenode_lst) > 0:
1527 1541 yield changegroup.genchunk(fname)
1528 1542 # Sort the filenodes by their revision #
1529 1543 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1530 1544 # Create a group generator and only pass in a changenode
1531 1545 # lookup function as we need to collect no information
1532 1546 # from filenodes.
1533 1547 group = filerevlog.group(msng_filenode_lst,
1534 1548 lookup_filenode_link_func(fname))
1535 1549 for chnk in group:
1536 1550 yield chnk
1537 1551 if msng_filenode_set.has_key(fname):
1538 1552 # Don't need this anymore, toss it to free memory.
1539 1553 del msng_filenode_set[fname]
1540 1554 # Signal that no more groups are left.
1541 1555 yield changegroup.closechunk()
1542 1556
1543 1557 if msng_cl_lst:
1544 1558 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1545 1559
1546 1560 return util.chunkbuffer(gengroup())
1547 1561
1548 1562 def changegroup(self, basenodes, source):
1549 1563 """Generate a changegroup of all nodes that we have that a recipient
1550 1564 doesn't.
1551 1565
1552 1566 This is much easier than the previous function as we can assume that
1553 1567 the recipient has any changenode we aren't sending them."""
1554 1568
1555 1569 self.hook('preoutgoing', throw=True, source=source)
1556 1570
1557 1571 cl = self.changelog
1558 1572 nodes = cl.nodesbetween(basenodes, None)[0]
1559 1573 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1560 1574
1561 1575 def identity(x):
1562 1576 return x
1563 1577
1564 1578 def gennodelst(revlog):
1565 1579 for r in xrange(0, revlog.count()):
1566 1580 n = revlog.node(r)
1567 1581 if revlog.linkrev(n) in revset:
1568 1582 yield n
1569 1583
1570 1584 def changed_file_collector(changedfileset):
1571 1585 def collect_changed_files(clnode):
1572 1586 c = cl.read(clnode)
1573 1587 for fname in c[3]:
1574 1588 changedfileset[fname] = 1
1575 1589 return collect_changed_files
1576 1590
1577 1591 def lookuprevlink_func(revlog):
1578 1592 def lookuprevlink(n):
1579 1593 return cl.node(revlog.linkrev(n))
1580 1594 return lookuprevlink
1581 1595
1582 1596 def gengroup():
1583 1597 # construct a list of all changed files
1584 1598 changedfiles = {}
1585 1599
1586 1600 for chnk in cl.group(nodes, identity,
1587 1601 changed_file_collector(changedfiles)):
1588 1602 yield chnk
1589 1603 changedfiles = changedfiles.keys()
1590 1604 changedfiles.sort()
1591 1605
1592 1606 mnfst = self.manifest
1593 1607 nodeiter = gennodelst(mnfst)
1594 1608 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1595 1609 yield chnk
1596 1610
1597 1611 for fname in changedfiles:
1598 1612 filerevlog = self.file(fname)
1599 1613 nodeiter = gennodelst(filerevlog)
1600 1614 nodeiter = list(nodeiter)
1601 1615 if nodeiter:
1602 1616 yield changegroup.genchunk(fname)
1603 1617 lookup = lookuprevlink_func(filerevlog)
1604 1618 for chnk in filerevlog.group(nodeiter, lookup):
1605 1619 yield chnk
1606 1620
1607 1621 yield changegroup.closechunk()
1608 1622
1609 1623 if nodes:
1610 1624 self.hook('outgoing', node=hex(nodes[0]), source=source)
1611 1625
1612 1626 return util.chunkbuffer(gengroup())
1613 1627
1614 1628 def addchangegroup(self, source, srctype, url):
1615 1629 """add changegroup to repo.
1616 1630 returns number of heads modified or added + 1."""
1617 1631
1618 1632 def csmap(x):
1619 1633 self.ui.debug(_("add changeset %s\n") % short(x))
1620 1634 return cl.count()
1621 1635
1622 1636 def revmap(x):
1623 1637 return cl.rev(x)
1624 1638
1625 1639 if not source:
1626 1640 return 0
1627 1641
1628 1642 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1629 1643
1630 1644 changesets = files = revisions = 0
1631 1645
1632 1646 tr = self.transaction()
1633 1647
1634 1648 # write changelog data to temp files so concurrent readers will not see
1635 1649 # inconsistent view
1636 1650 cl = None
1637 1651 try:
1638 1652 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1639 1653
1640 1654 oldheads = len(cl.heads())
1641 1655
1642 1656 # pull off the changeset group
1643 1657 self.ui.status(_("adding changesets\n"))
1644 1658 cor = cl.count() - 1
1645 1659 chunkiter = changegroup.chunkiter(source)
1646 1660 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1647 1661 raise util.Abort(_("received changelog group is empty"))
1648 1662 cnr = cl.count() - 1
1649 1663 changesets = cnr - cor
1650 1664
1651 1665 # pull off the manifest group
1652 1666 self.ui.status(_("adding manifests\n"))
1653 1667 chunkiter = changegroup.chunkiter(source)
1654 1668 # no need to check for empty manifest group here:
1655 1669 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1656 1670 # no new manifest will be created and the manifest group will
1657 1671 # be empty during the pull
1658 1672 self.manifest.addgroup(chunkiter, revmap, tr)
1659 1673
1660 1674 # process the files
1661 1675 self.ui.status(_("adding file changes\n"))
1662 1676 while 1:
1663 1677 f = changegroup.getchunk(source)
1664 1678 if not f:
1665 1679 break
1666 1680 self.ui.debug(_("adding %s revisions\n") % f)
1667 1681 fl = self.file(f)
1668 1682 o = fl.count()
1669 1683 chunkiter = changegroup.chunkiter(source)
1670 1684 if fl.addgroup(chunkiter, revmap, tr) is None:
1671 1685 raise util.Abort(_("received file revlog group is empty"))
1672 1686 revisions += fl.count() - o
1673 1687 files += 1
1674 1688
1675 1689 cl.writedata()
1676 1690 finally:
1677 1691 if cl:
1678 1692 cl.cleanup()
1679 1693
1680 1694 # make changelog see real files again
1681 1695 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1682 1696 self.changelog.checkinlinesize(tr)
1683 1697
1684 1698 newheads = len(self.changelog.heads())
1685 1699 heads = ""
1686 1700 if oldheads and newheads != oldheads:
1687 1701 heads = _(" (%+d heads)") % (newheads - oldheads)
1688 1702
1689 1703 self.ui.status(_("added %d changesets"
1690 1704 " with %d changes to %d files%s\n")
1691 1705 % (changesets, revisions, files, heads))
1692 1706
1693 1707 if changesets > 0:
1694 1708 self.hook('pretxnchangegroup', throw=True,
1695 1709 node=hex(self.changelog.node(cor+1)), source=srctype,
1696 1710 url=url)
1697 1711
1698 1712 tr.close()
1699 1713
1700 1714 if changesets > 0:
1701 1715 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1702 1716 source=srctype, url=url)
1703 1717
1704 1718 for i in range(cor + 1, cnr + 1):
1705 1719 self.hook("incoming", node=hex(self.changelog.node(i)),
1706 1720 source=srctype, url=url)
1707 1721
1708 1722 return newheads - oldheads + 1
1709 1723
1710 1724
1711 1725 def stream_in(self, remote):
1712 1726 fp = remote.stream_out()
1713 1727 resp = int(fp.readline())
1714 1728 if resp != 0:
1715 1729 raise util.Abort(_('operation forbidden by server'))
1716 1730 self.ui.status(_('streaming all changes\n'))
1717 1731 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1718 1732 self.ui.status(_('%d files to transfer, %s of data\n') %
1719 1733 (total_files, util.bytecount(total_bytes)))
1720 1734 start = time.time()
1721 1735 for i in xrange(total_files):
1722 1736 name, size = fp.readline().split('\0', 1)
1723 1737 size = int(size)
1724 1738 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1725 1739 ofp = self.opener(name, 'w')
1726 1740 for chunk in util.filechunkiter(fp, limit=size):
1727 1741 ofp.write(chunk)
1728 1742 ofp.close()
1729 1743 elapsed = time.time() - start
1730 1744 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1731 1745 (util.bytecount(total_bytes), elapsed,
1732 1746 util.bytecount(total_bytes / elapsed)))
1733 1747 self.reload()
1734 1748 return len(self.heads()) + 1
1735 1749
1736 1750 def clone(self, remote, heads=[], stream=False):
1737 1751 '''clone remote repository.
1738 1752
1739 1753 keyword arguments:
1740 1754 heads: list of revs to clone (forces use of pull)
1741 1755 stream: use streaming clone if possible'''
1742 1756
1743 1757 # now, all clients that can request uncompressed clones can
1744 1758 # read repo formats supported by all servers that can serve
1745 1759 # them.
1746 1760
1747 1761 # if revlog format changes, client will have to check version
1748 1762 # and format flags on "stream" capability, and use
1749 1763 # uncompressed only if compatible.
1750 1764
1751 1765 if stream and not heads and remote.capable('stream'):
1752 1766 return self.stream_in(remote)
1753 1767 return self.pull(remote, heads)
1754 1768
1755 1769 # used to avoid circular references so destructors work
1756 1770 def aftertrans(base):
1757 1771 p = base
1758 1772 def a():
1759 1773 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1760 1774 util.rename(os.path.join(p, "journal.dirstate"),
1761 1775 os.path.join(p, "undo.dirstate"))
1762 1776 return a
1763 1777
1764 1778 def instance(ui, path, create):
1765 1779 return localrepository(ui, util.drop_scheme('file', path), create)
1766 1780
1767 1781 def islocal(path):
1768 1782 return True
@@ -1,463 +1,465 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "errno util os tempfile")
12 12
13 13 def filemerge(repo, fw, fo, fd, my, other, p1, p2, move):
14 14 """perform a 3-way merge in the working directory
15 15
16 16 fw = filename in the working directory and first parent
17 17 fo = filename in other parent
18 18 fd = destination filename
19 19 my = fileid in first parent
20 20 other = fileid in second parent
21 21 p1, p2 = hex changeset ids for merge command
22 22 move = whether to move or copy the file to the destination
23 23
24 24 TODO:
25 25 if fw is copied in the working directory, we get confused
26 26 implement move and fd
27 27 """
28 28
29 29 def temp(prefix, ctx):
30 30 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
31 31 (fd, name) = tempfile.mkstemp(prefix=pre)
32 32 f = os.fdopen(fd, "wb")
33 33 repo.wwrite(ctx.path(), ctx.data(), f)
34 34 f.close()
35 35 return name
36 36
37 37 fcm = repo.filectx(fw, fileid=my)
38 38 fco = repo.filectx(fo, fileid=other)
39 39 fca = fcm.ancestor(fco)
40 40 if not fca:
41 41 fca = repo.filectx(fw, fileid=-1)
42 42 a = repo.wjoin(fw)
43 43 b = temp("base", fca)
44 44 c = temp("other", fco)
45 45
46 46 repo.ui.note(_("resolving %s\n") % fw)
47 47 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
48 48
49 49 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
50 50 or "hgmerge")
51 51 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
52 52 environ={'HG_FILE': fw,
53 53 'HG_MY_NODE': p1,
54 54 'HG_OTHER_NODE': p2})
55 55 if r:
56 56 repo.ui.warn(_("merging %s failed!\n") % fw)
57 57 else:
58 58 if fd != fw:
59 59 repo.ui.debug(_("copying %s to %s\n") % (fw, fd))
60 60 repo.wwrite(fd, repo.wread(fw))
61 61 if move:
62 62 repo.ui.debug(_("removing %s\n") % fw)
63 63 os.unlink(a)
64 64
65 65 os.unlink(b)
66 66 os.unlink(c)
67 67 return r
68 68
69 69 def checkunknown(repo, m2, wctx):
70 70 """
71 71 check for collisions between unknown files and files in m2
72 72 """
73 73 for f in wctx.unknown():
74 74 if f in m2:
75 75 if repo.file(f).cmp(m2[f], repo.wread(f)):
76 76 raise util.Abort(_("'%s' already exists in the working"
77 77 " dir and differs from remote") % f)
78 78
79 79 def forgetremoved(m2, wctx):
80 80 """
81 81 Forget removed files
82 82
83 83 If we're jumping between revisions (as opposed to merging), and if
84 84 neither the working directory nor the target rev has the file,
85 85 then we need to remove it from the dirstate, to prevent the
86 86 dirstate from listing the file when it is no longer in the
87 87 manifest.
88 88 """
89 89
90 90 action = []
91 91
92 92 for f in wctx.deleted() + wctx.removed():
93 93 if f not in m2:
94 94 action.append((f, "f"))
95 95
96 96 return action
97 97
98 98 def nonoverlap(d1, d2):
99 99 """
100 100 Return list of elements in d1 not in d2
101 101 """
102 102
103 103 l = []
104 104 for d in d1:
105 105 if d not in d2:
106 106 l.append(d)
107 107
108 108 l.sort()
109 109 return l
110 110
111 111 def findold(fctx, limit):
112 112 """
113 113 find files that path was copied from, back to linkrev limit
114 114 """
115 115
116 116 old = {}
117 117 orig = fctx.path()
118 118 visit = [fctx]
119 119 while visit:
120 120 fc = visit.pop()
121 121 if fc.rev() < limit:
122 122 continue
123 123 if fc.path() != orig and fc.path() not in old:
124 124 old[fc.path()] = 1
125 125 visit += fc.parents()
126 126
127 127 old = old.keys()
128 128 old.sort()
129 129 return old
130 130
131 131 def findcopies(repo, m1, m2, limit):
132 132 """
133 133 Find moves and copies between m1 and m2 back to limit linkrev
134 134 """
135 135
136 136 if not repo.ui.config("merge", "followcopies"):
137 137 return {}
138 138
139 139 # avoid silly behavior for update from empty dir
140 140 if not m1:
141 141 return {}
142 142
143 143 dcopies = repo.dirstate.copies()
144 144 copy = {}
145 145 match = {}
146 146 u1 = nonoverlap(m1, m2)
147 147 u2 = nonoverlap(m2, m1)
148 148 ctx = util.cachefunc(lambda f,n: repo.filectx(f, fileid=n[:20]))
149 149
150 150 def checkpair(c, f2, man):
151 151 ''' check if an apparent pair actually matches '''
152 152 c2 = ctx(f2, man[f2])
153 153 ca = c.ancestor(c2)
154 154 if ca and ca.path() == c.path() or ca.path() == c2.path():
155 155 copy[c.path()] = f2
156 156 copy[f2] = c.path()
157 157
158 158 for f in u1:
159 159 c = ctx(dcopies.get(f, f), m1[f])
160 160 for of in findold(c, limit):
161 161 if of in m2:
162 162 checkpair(c, of, m2)
163 163 else:
164 164 match.setdefault(of, []).append(f)
165 165
166 166 for f in u2:
167 167 c = ctx(f, m2[f])
168 168 for of in findold(c, limit):
169 169 if of in m1:
170 170 checkpair(c, of, m1)
171 171 elif of in match:
172 172 for mf in match[of]:
173 173 checkpair(c, mf, m1)
174 174
175 175 return copy
176 176
177 177 def manifestmerge(ui, m1, m2, ma, copy, overwrite, backwards, partial):
178 178 """
179 179 Merge manifest m1 with m2 using ancestor ma and generate merge action list
180 180 """
181 181
182 182 def fmerge(f, f2=None, fa=None):
183 183 """merge executable flags"""
184 184 if not f2:
185 185 f2 = f
186 186 fa = f
187 187 a, b, c = ma.execf(fa), m1.execf(f), m2.execf(f2)
188 188 return ((a^b) | (a^c)) ^ a
189 189
190 190 action = []
191 191
192 192 def act(msg, f, m, *args):
193 193 ui.debug(" %s: %s -> %s\n" % (f, msg, m))
194 194 action.append((f, m) + args)
195 195
196 196 # Compare manifests
197 197 for f, n in m1.iteritems():
198 198 if partial and not partial(f):
199 199 continue
200 200 if f in m2:
201 201 # are files different?
202 202 if n != m2[f]:
203 203 a = ma.get(f, nullid)
204 204 # are both different from the ancestor?
205 205 if not overwrite and n != a and m2[f] != a:
206 206 act("versions differ", f, "m", fmerge(f), n[:20], m2[f])
207 207 # are we clobbering?
208 208 # is remote's version newer?
209 209 # or are we going back in time and clean?
210 210 elif overwrite or m2[f] != a or (backwards and not n[20:]):
211 211 act("remote is newer", f, "g", m2.execf(f), m2[f])
212 212 # local is newer, not overwrite, check mode bits
213 213 elif fmerge(f) != m1.execf(f):
214 214 act("update permissions", f, "e", m2.execf(f))
215 215 # contents same, check mode bits
216 216 elif m1.execf(f) != m2.execf(f):
217 217 if overwrite or fmerge(f) != m1.execf(f):
218 218 act("update permissions", f, "e", m2.execf(f))
219 219 elif f in copy:
220 220 f2 = copy[f]
221 221 if f in ma: # case 3,20 A/B/A
222 222 act("remote moved",
223 223 f, "c", f2, f2, m1[f], m2[f2], fmerge(f, f2, f), True)
224 224 else:
225 225 if f2 in m1: # case 2 A,B/B/B
226 226 act("local copied",
227 227 f, "c", f2, f, m1[f], m2[f2], fmerge(f, f2, f2), False)
228 228 else: # case 4,21 A/B/B
229 229 act("local moved",
230 230 f, "c", f2, f, m1[f], m2[f2], fmerge(f, f2, f2), False)
231 231 elif f in ma:
232 232 if n != ma[f] and not overwrite:
233 233 if ui.prompt(
234 234 (_(" local changed %s which remote deleted\n") % f) +
235 235 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
236 236 act("prompt delete", f, "r")
237 237 else:
238 238 act("other deleted", f, "r")
239 239 else:
240 240 # file is created on branch or in working directory
241 241 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
242 242 act("remote deleted", f, "r")
243 243
244 244 for f, n in m2.iteritems():
245 245 if partial and not partial(f):
246 246 continue
247 247 if f in m1:
248 248 continue
249 249 if f in copy:
250 250 f2 = copy[f]
251 251 if f2 not in m2: # already seen
252 252 continue
253 253 # rename case 1, A/A,B/A
254 254 act("remote copied",
255 255 f2, "c", f, f, m1[f2], m2[f], fmerge(f2, f, f2), False)
256 256 elif f in ma:
257 257 if overwrite or backwards:
258 258 act("recreating", f, "g", m2.execf(f), n)
259 259 elif n != ma[f]:
260 260 if ui.prompt(
261 261 (_("remote changed %s which local deleted\n") % f) +
262 262 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
263 263 act("prompt recreating", f, "g", m2.execf(f), n)
264 264 else:
265 265 act("remote created", f, "g", m2.execf(f), n)
266 266
267 267 return action
268 268
269 269 def applyupdates(repo, action, xp1, xp2):
270 270 updated, merged, removed, unresolved = 0, 0, 0, 0
271 271 action.sort()
272 272 for a in action:
273 273 f, m = a[:2]
274 274 if f[0] == "/":
275 275 continue
276 276 if m == "r": # remove
277 277 repo.ui.note(_("removing %s\n") % f)
278 278 util.audit_path(f)
279 279 try:
280 280 util.unlink(repo.wjoin(f))
281 281 except OSError, inst:
282 282 if inst.errno != errno.ENOENT:
283 283 repo.ui.warn(_("update failed to remove %s: %s!\n") %
284 284 (f, inst.strerror))
285 285 removed +=1
286 286 elif m == "c": # copy
287 287 f2, fd, my, other, flag, move = a[2:]
288 288 repo.ui.status(_("merging %s and %s to %s\n") % (f, f2, fd))
289 289 if filemerge(repo, f, f2, fd, my, other, xp1, xp2, move):
290 290 unresolved += 1
291 291 util.set_exec(repo.wjoin(fd), flag)
292 292 merged += 1
293 293 elif m == "m": # merge
294 294 flag, my, other = a[2:]
295 295 repo.ui.status(_("merging %s\n") % f)
296 296 if filemerge(repo, f, f, f, my, other, xp1, xp2, False):
297 297 unresolved += 1
298 298 util.set_exec(repo.wjoin(f), flag)
299 299 merged += 1
300 300 elif m == "g": # get
301 301 flag, node = a[2:]
302 302 repo.ui.note(_("getting %s\n") % f)
303 303 t = repo.file(f).read(node)
304 304 repo.wwrite(f, t)
305 305 util.set_exec(repo.wjoin(f), flag)
306 306 updated += 1
307 307 elif m == "e": # exec
308 308 flag = a[2:]
309 309 util.set_exec(repo.wjoin(f), flag)
310 310
311 311 return updated, merged, removed, unresolved
312 312
313 313 def recordupdates(repo, action, branchmerge):
314 314 for a in action:
315 315 f, m = a[:2]
316 316 if m == "r": # remove
317 317 if branchmerge:
318 318 repo.dirstate.update([f], 'r')
319 319 else:
320 320 repo.dirstate.forget([f])
321 321 elif m == "f": # forget
322 322 repo.dirstate.forget([f])
323 323 elif m == "g": # get
324 324 if branchmerge:
325 325 repo.dirstate.update([f], 'n', st_mtime=-1)
326 326 else:
327 327 repo.dirstate.update([f], 'n')
328 328 elif m == "m": # merge
329 329 flag, my, other = a[2:]
330 330 if branchmerge:
331 331 # We've done a branch merge, mark this file as merged
332 332 # so that we properly record the merger later
333 333 repo.dirstate.update([f], 'm')
334 334 else:
335 335 # We've update-merged a locally modified file, so
336 336 # we set the dirstate to emulate a normal checkout
337 337 # of that file some time in the past. Thus our
338 338 # merge will appear as a normal local file
339 339 # modification.
340 340 fl = repo.file(f)
341 341 f_len = fl.size(fl.rev(other))
342 342 repo.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
343 343 elif m == "c": # copy
344 344 f2, fd, my, other, flag, move = a[2:]
345 345 if branchmerge:
346 346 # We've done a branch merge, mark this file as merged
347 347 # so that we properly record the merger later
348 348 repo.dirstate.update([fd], 'm')
349 349 else:
350 350 # We've update-merged a locally modified file, so
351 351 # we set the dirstate to emulate a normal checkout
352 352 # of that file some time in the past. Thus our
353 353 # merge will appear as a normal local file
354 354 # modification.
355 355 fl = repo.file(f)
356 356 f_len = fl.size(fl.rev(other))
357 357 repo.dirstate.update([fd], 'n', st_size=f_len, st_mtime=-1)
358 358 if move:
359 359 repo.dirstate.update([f], 'r')
360 360 if f != fd:
361 361 repo.dirstate.copy(f, fd)
362 else:
363 repo.dirstate.copy(f2, fd)
362 364
363 365 def update(repo, node, branchmerge=False, force=False, partial=None,
364 366 wlock=None, show_stats=True, remind=True):
365 367
366 368 overwrite = force and not branchmerge
367 369 forcemerge = force and branchmerge
368 370
369 371 if not wlock:
370 372 wlock = repo.wlock()
371 373
372 374 ### check phase
373 375
374 376 wc = repo.workingctx()
375 377 pl = wc.parents()
376 378 if not overwrite and len(pl) > 1:
377 379 raise util.Abort(_("outstanding uncommitted merges"))
378 380
379 381 p1, p2 = pl[0], repo.changectx(node)
380 382 pa = p1.ancestor(p2)
381 383
382 384 # are we going backwards?
383 385 backwards = (pa == p2)
384 386
385 387 # is there a linear path from p1 to p2?
386 388 if pa == p1 or pa == p2:
387 389 if branchmerge:
388 390 raise util.Abort(_("there is nothing to merge, just use "
389 391 "'hg update' or look at 'hg heads'"))
390 392 elif not (overwrite or branchmerge):
391 393 raise util.Abort(_("update spans branches, use 'hg merge' "
392 394 "or 'hg update -C' to lose changes"))
393 395
394 396 if branchmerge and not forcemerge:
395 397 if wc.modified() or wc.added() or wc.removed():
396 398 raise util.Abort(_("outstanding uncommitted changes"))
397 399
398 400 m1 = wc.manifest()
399 401 m2 = p2.manifest()
400 402 ma = pa.manifest()
401 403
402 404 # resolve the manifest to determine which files
403 405 # we care about merging
404 406 repo.ui.note(_("resolving manifests\n"))
405 407 repo.ui.debug(_(" overwrite %s branchmerge %s partial %s\n") %
406 408 (overwrite, branchmerge, bool(partial)))
407 409 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (p1, p2, pa))
408 410
409 411 action = []
410 412 copy = {}
411 413
412 414 if not force:
413 415 checkunknown(repo, m2, wc)
414 416 if not branchmerge:
415 417 action += forgetremoved(m2, wc)
416 418 if not (backwards or overwrite):
417 419 copy = findcopies(repo, m1, m2, pa.rev())
418 420
419 421 action += manifestmerge(repo.ui, m1, m2, ma, copy,
420 422 overwrite, backwards, partial)
421 423
422 424 ### apply phase
423 425
424 426 if not branchmerge:
425 427 # we don't need to do any magic, just jump to the new rev
426 428 p1, p2 = p2, repo.changectx(nullid)
427 429
428 430 xp1, xp2 = str(p1), str(p2)
429 431 if not p2: xp2 = ''
430 432
431 433 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
432 434
433 435 updated, merged, removed, unresolved = applyupdates(repo, action, xp1, xp2)
434 436
435 437 # update dirstate
436 438 if not partial:
437 439 recordupdates(repo, action, branchmerge)
438 440 repo.dirstate.setparents(p1.node(), p2.node())
439 441
440 442 if show_stats:
441 443 stats = ((updated, _("updated")),
442 444 (merged - unresolved, _("merged")),
443 445 (removed, _("removed")),
444 446 (unresolved, _("unresolved")))
445 447 note = ", ".join([_("%d files %s") % s for s in stats])
446 448 repo.ui.status("%s\n" % note)
447 449 if not partial:
448 450 if branchmerge:
449 451 if unresolved:
450 452 repo.ui.status(_("There are unresolved merges,"
451 453 " you can redo the full merge using:\n"
452 454 " hg update -C %s\n"
453 455 " hg merge %s\n"
454 456 % (p1.rev(), p2.rev())))
455 457 elif remind:
456 458 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
457 459 elif unresolved:
458 460 repo.ui.status(_("There are unresolved merges with"
459 461 " locally modified files.\n"))
460 462
461 463 repo.hook('update', parent1=xp1, parent2=xp2, error=unresolved)
462 464 return unresolved
463 465
@@ -1,25 +1,27 b''
1 1 #!/bin/sh
2 2
3 3 mkdir t
4 4 cd t
5 5 hg init
6 6 echo "[merge]" >> .hg/hgrc
7 7 echo "followcopies = 1" >> .hg/hgrc
8 8 echo foo > a
9 9 echo foo > a2
10 10 hg add a a2
11 11 hg ci -m "start" -d "0 0"
12 12 hg mv a b
13 13 hg mv a2 b2
14 14 hg ci -m "rename" -d "0 0"
15 15 echo "checkout"
16 16 hg co 0
17 17 echo blahblah > a
18 18 echo blahblah > a2
19 19 hg mv a2 c2
20 20 hg ci -m "modify" -d "0 0"
21 21 echo "merge"
22 22 hg merge -y --debug
23 23 hg status -AC
24 24 cat b
25 25 hg ci -m "merge" -d "0 0"
26 hg debugindex .hg/data/b.i
27 hg debugrename b No newline at end of file
@@ -1,22 +1,26 b''
1 1 checkout
2 2 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
3 3 merge
4 4 resolving manifests
5 5 overwrite None branchmerge True partial False
6 6 ancestor f26ec4fc3fa3 local 8e765a822af2 remote af1939970a1c
7 7 a: remote moved -> c
8 8 b2: remote created -> g
9 9 merging a and b to b
10 10 resolving a
11 11 my a@f26ec4fc3fa3 other b@8e765a822af2 ancestor a@af1939970a1c
12 12 copying a to b
13 13 removing a
14 14 getting b2
15 15 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
16 16 (branch merge, don't forget to commit)
17 17 M b
18 18 a
19 19 M b2
20 20 R a
21 21 C c2
22 22 blahblah
23 rev offset length base linkrev nodeid p1 p2
24 0 0 67 0 1 dc51707dfc98 000000000000 000000000000
25 1 67 72 1 3 b2494a44f0a9 000000000000 dc51707dfc98
26 renamed from a:dd03b83622e78778b403775d0d074b9ac7387a66
@@ -1,457 +1,463 b''
1 1 --------------
2 2 test L:up a R:nc a b W: - 1 get local a to b
3 3 --------------
4 4 resolving manifests
5 5 overwrite None branchmerge True partial False
6 6 ancestor e300d1c794ec local 735846fee2d7 remote 924404dff337
7 7 rev: versions differ -> m
8 8 a: remote copied -> c
9 9 merging a and b to b
10 10 resolving a
11 11 my a@e300d1c794ec other b@735846fee2d7 ancestor a@924404dff337
12 12 copying a to b
13 13 merging rev
14 14 resolving rev
15 15 my rev@e300d1c794ec other rev@735846fee2d7 ancestor rev@924404dff337
16 16 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
17 17 (branch merge, don't forget to commit)
18 18 --------------
19 19 M a
20 20 M b
21 21 a
22 22 --------------
23 23
24 24 --------------
25 25 test L:nc a b R:up a W: - 2 get rem change to a and b
26 26 --------------
27 27 resolving manifests
28 28 overwrite None branchmerge True partial False
29 29 ancestor ac809aeed39a local f4db7e329e71 remote 924404dff337
30 30 a: remote is newer -> g
31 31 b: local copied -> c
32 32 rev: versions differ -> m
33 33 getting a
34 34 merging b and a to b
35 35 resolving b
36 36 my b@ac809aeed39a other a@f4db7e329e71 ancestor a@924404dff337
37 37 merging rev
38 38 resolving rev
39 39 my rev@ac809aeed39a other rev@f4db7e329e71 ancestor rev@924404dff337
40 40 1 files updated, 2 files merged, 0 files removed, 0 files unresolved
41 41 (branch merge, don't forget to commit)
42 42 --------------
43 43 M a
44 44 M b
45 a
45 46 --------------
46 47
47 48 --------------
48 49 test L:up a R:nm a b W: - 3 get local a change to b, remove a
49 50 --------------
50 51 resolving manifests
51 52 overwrite None branchmerge True partial False
52 53 ancestor e300d1c794ec local e03727d2d66b remote 924404dff337
53 54 a: remote moved -> c
54 55 rev: versions differ -> m
55 56 merging a and b to b
56 57 resolving a
57 58 my a@e300d1c794ec other b@e03727d2d66b ancestor a@924404dff337
58 59 copying a to b
59 60 removing a
60 61 merging rev
61 62 resolving rev
62 63 my rev@e300d1c794ec other rev@e03727d2d66b ancestor rev@924404dff337
63 64 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
64 65 (branch merge, don't forget to commit)
65 66 --------------
66 67 M b
67 68 a
68 69 --------------
69 70
70 71 --------------
71 72 test L:nm a b R:up a W: - 4 get remote change to b
72 73 --------------
73 74 resolving manifests
74 75 overwrite None branchmerge True partial False
75 76 ancestor ecf3cb2a4219 local f4db7e329e71 remote 924404dff337
76 77 b: local moved -> c
77 78 rev: versions differ -> m
78 79 merging b and a to b
79 80 resolving b
80 81 my b@ecf3cb2a4219 other a@f4db7e329e71 ancestor a@924404dff337
81 82 merging rev
82 83 resolving rev
83 84 my rev@ecf3cb2a4219 other rev@f4db7e329e71 ancestor rev@924404dff337
84 85 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
85 86 (branch merge, don't forget to commit)
86 87 --------------
87 88 M b
89 a
88 90 --------------
89 91
90 92 --------------
91 93 test L: R:nc a b W: - 5 get b
92 94 --------------
93 95 resolving manifests
94 96 overwrite None branchmerge True partial False
95 97 ancestor 94b33a1b7f2d local 735846fee2d7 remote 924404dff337
96 98 rev: versions differ -> m
97 99 a: remote copied -> c
98 100 merging a and b to b
99 101 resolving a
100 102 my a@924404dff337 other b@735846fee2d7 ancestor a@924404dff337
101 103 copying a to b
102 104 merging rev
103 105 resolving rev
104 106 my rev@94b33a1b7f2d other rev@735846fee2d7 ancestor rev@924404dff337
105 107 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
106 108 (branch merge, don't forget to commit)
107 109 --------------
108 110 M a
109 111 M b
110 112 a
111 113 --------------
112 114
113 115 --------------
114 116 test L:nc a b R: W: - 6 nothing
115 117 --------------
116 118 resolving manifests
117 119 overwrite None branchmerge True partial False
118 120 ancestor ac809aeed39a local 97c705ade336 remote 924404dff337
119 121 b: local copied -> c
120 122 rev: versions differ -> m
121 123 merging b and a to b
122 124 resolving b
123 125 my b@ac809aeed39a other a@924404dff337 ancestor a@924404dff337
124 126 merging rev
125 127 resolving rev
126 128 my rev@ac809aeed39a other rev@97c705ade336 ancestor rev@924404dff337
127 129 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
128 130 (branch merge, don't forget to commit)
129 131 --------------
130 132 M b
133 a
131 134 C a
132 135 --------------
133 136
134 137 --------------
135 138 test L: R:nm a b W: - 7 get b
136 139 --------------
137 140 resolving manifests
138 141 overwrite None branchmerge True partial False
139 142 ancestor 94b33a1b7f2d local e03727d2d66b remote 924404dff337
140 143 a: remote moved -> c
141 144 rev: versions differ -> m
142 145 merging a and b to b
143 146 resolving a
144 147 my a@924404dff337 other b@e03727d2d66b ancestor a@924404dff337
145 148 copying a to b
146 149 removing a
147 150 merging rev
148 151 resolving rev
149 152 my rev@94b33a1b7f2d other rev@e03727d2d66b ancestor rev@924404dff337
150 153 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
151 154 (branch merge, don't forget to commit)
152 155 --------------
153 156 M b
154 157 a
155 158 --------------
156 159
157 160 --------------
158 161 test L:nm a b R: W: - 8 nothing
159 162 --------------
160 163 resolving manifests
161 164 overwrite None branchmerge True partial False
162 165 ancestor ecf3cb2a4219 local 97c705ade336 remote 924404dff337
163 166 b: local moved -> c
164 167 rev: versions differ -> m
165 168 merging b and a to b
166 169 resolving b
167 170 my b@ecf3cb2a4219 other a@924404dff337 ancestor a@924404dff337
168 171 merging rev
169 172 resolving rev
170 173 my rev@ecf3cb2a4219 other rev@97c705ade336 ancestor rev@924404dff337
171 174 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
172 175 (branch merge, don't forget to commit)
173 176 --------------
174 177 M b
178 a
175 179 --------------
176 180
177 181 --------------
178 182 test L:um a b R:um a b W: - 9 do merge with ancestor in a
179 183 --------------
180 184 resolving manifests
181 185 overwrite None branchmerge True partial False
182 186 ancestor ec03c2ca8642 local 79cc6877a3b7 remote 924404dff337
183 187 b: versions differ -> m
184 188 rev: versions differ -> m
185 189 merging b
186 190 resolving b
187 191 my b@ec03c2ca8642 other b@79cc6877a3b7 ancestor a@924404dff337
188 192 merging rev
189 193 resolving rev
190 194 my rev@ec03c2ca8642 other rev@79cc6877a3b7 ancestor rev@924404dff337
191 195 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
192 196 (branch merge, don't forget to commit)
193 197 --------------
194 198 M b
195 199 --------------
196 200
197 201 --------------
198 202 test L:nm a b R:nm a c W: - 11 get c, keep b
199 203 --------------
200 204 resolving manifests
201 205 overwrite None branchmerge True partial False
202 206 ancestor ecf3cb2a4219 local e6abcc1a30c2 remote 924404dff337
203 207 rev: versions differ -> m
204 208 c: remote created -> g
205 209 getting c
206 210 merging rev
207 211 resolving rev
208 212 my rev@ecf3cb2a4219 other rev@e6abcc1a30c2 ancestor rev@924404dff337
209 213 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
210 214 (branch merge, don't forget to commit)
211 215 --------------
212 216 M c
213 217 C b
214 218 --------------
215 219
216 220 --------------
217 221 test L:nc a b R:up b W: - 12 merge b no ancestor
218 222 --------------
219 223 resolving manifests
220 224 overwrite None branchmerge True partial False
221 225 ancestor ac809aeed39a local af30c7647fc7 remote 924404dff337
222 226 b: versions differ -> m
223 227 rev: versions differ -> m
224 228 merging b
225 229 resolving b
226 230 my b@ac809aeed39a other b@af30c7647fc7 ancestor b@000000000000
227 231 merging rev
228 232 resolving rev
229 233 my rev@ac809aeed39a other rev@af30c7647fc7 ancestor rev@924404dff337
230 234 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
231 235 (branch merge, don't forget to commit)
232 236 --------------
233 237 M b
234 238 C a
235 239 --------------
236 240
237 241 --------------
238 242 test L:up b R:nm a b W: - 13 merge b no ancestor
239 243 --------------
240 244 resolving manifests
241 245 overwrite None branchmerge True partial False
242 246 ancestor 59318016310c local e03727d2d66b remote 924404dff337
243 247 a: other deleted -> r
244 248 b: versions differ -> m
245 249 rev: versions differ -> m
246 250 removing a
247 251 merging b
248 252 resolving b
249 253 my b@59318016310c other b@e03727d2d66b ancestor b@000000000000
250 254 merging rev
251 255 resolving rev
252 256 my rev@59318016310c other rev@e03727d2d66b ancestor rev@924404dff337
253 257 0 files updated, 2 files merged, 1 files removed, 0 files unresolved
254 258 (branch merge, don't forget to commit)
255 259 --------------
256 260 M b
257 261 --------------
258 262
259 263 --------------
260 264 test L:nc a b R:up a b W: - 14 merge b no ancestor
261 265 --------------
262 266 resolving manifests
263 267 overwrite None branchmerge True partial False
264 268 ancestor ac809aeed39a local 8dbce441892a remote 924404dff337
265 269 a: remote is newer -> g
266 270 b: versions differ -> m
267 271 rev: versions differ -> m
268 272 getting a
269 273 merging b
270 274 resolving b
271 275 my b@ac809aeed39a other b@8dbce441892a ancestor b@000000000000
272 276 merging rev
273 277 resolving rev
274 278 my rev@ac809aeed39a other rev@8dbce441892a ancestor rev@924404dff337
275 279 1 files updated, 2 files merged, 0 files removed, 0 files unresolved
276 280 (branch merge, don't forget to commit)
277 281 --------------
278 282 M a
279 283 M b
280 284 --------------
281 285
282 286 --------------
283 287 test L:up b R:nm a b W: - 15 merge b no ancestor, remove a
284 288 --------------
285 289 resolving manifests
286 290 overwrite None branchmerge True partial False
287 291 ancestor 59318016310c local e03727d2d66b remote 924404dff337
288 292 a: other deleted -> r
289 293 b: versions differ -> m
290 294 rev: versions differ -> m
291 295 removing a
292 296 merging b
293 297 resolving b
294 298 my b@59318016310c other b@e03727d2d66b ancestor b@000000000000
295 299 merging rev
296 300 resolving rev
297 301 my rev@59318016310c other rev@e03727d2d66b ancestor rev@924404dff337
298 302 0 files updated, 2 files merged, 1 files removed, 0 files unresolved
299 303 (branch merge, don't forget to commit)
300 304 --------------
301 305 M b
302 306 --------------
303 307
304 308 --------------
305 309 test L:nc a b R:up a b W: - 16 get a, merge b no ancestor
306 310 --------------
307 311 resolving manifests
308 312 overwrite None branchmerge True partial False
309 313 ancestor ac809aeed39a local 8dbce441892a remote 924404dff337
310 314 a: remote is newer -> g
311 315 b: versions differ -> m
312 316 rev: versions differ -> m
313 317 getting a
314 318 merging b
315 319 resolving b
316 320 my b@ac809aeed39a other b@8dbce441892a ancestor b@000000000000
317 321 merging rev
318 322 resolving rev
319 323 my rev@ac809aeed39a other rev@8dbce441892a ancestor rev@924404dff337
320 324 1 files updated, 2 files merged, 0 files removed, 0 files unresolved
321 325 (branch merge, don't forget to commit)
322 326 --------------
323 327 M a
324 328 M b
325 329 --------------
326 330
327 331 --------------
328 332 test L:up a b R:nc a b W: - 17 keep a, merge b no ancestor
329 333 --------------
330 334 resolving manifests
331 335 overwrite None branchmerge True partial False
332 336 ancestor 0b76e65c8289 local 735846fee2d7 remote 924404dff337
333 337 b: versions differ -> m
334 338 rev: versions differ -> m
335 339 merging b
336 340 resolving b
337 341 my b@0b76e65c8289 other b@735846fee2d7 ancestor b@000000000000
338 342 merging rev
339 343 resolving rev
340 344 my rev@0b76e65c8289 other rev@735846fee2d7 ancestor rev@924404dff337
341 345 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
342 346 (branch merge, don't forget to commit)
343 347 --------------
344 348 M b
345 349 C a
346 350 --------------
347 351
348 352 --------------
349 353 test L:nm a b R:up a b W: - 18 merge b no ancestor
350 354 --------------
351 355 resolving manifests
352 356 overwrite None branchmerge True partial False
353 357 ancestor ecf3cb2a4219 local 8dbce441892a remote 924404dff337
354 358 b: versions differ -> m
355 359 rev: versions differ -> m
356 360 a: prompt recreating -> g
357 361 getting a
358 362 merging b
359 363 resolving b
360 364 my b@ecf3cb2a4219 other b@8dbce441892a ancestor b@000000000000
361 365 merging rev
362 366 resolving rev
363 367 my rev@ecf3cb2a4219 other rev@8dbce441892a ancestor rev@924404dff337
364 368 1 files updated, 2 files merged, 0 files removed, 0 files unresolved
365 369 (branch merge, don't forget to commit)
366 370 --------------
367 371 M a
368 372 M b
369 373 --------------
370 374
371 375 --------------
372 376 test L:up a b R:nm a b W: - 19 merge b no ancestor, prompt remove a
373 377 --------------
374 378 resolving manifests
375 379 overwrite None branchmerge True partial False
376 380 ancestor 0b76e65c8289 local e03727d2d66b remote 924404dff337
377 381 b: versions differ -> m
378 382 rev: versions differ -> m
379 383 merging b
380 384 resolving b
381 385 my b@0b76e65c8289 other b@e03727d2d66b ancestor b@000000000000
382 386 merging rev
383 387 resolving rev
384 388 my rev@0b76e65c8289 other rev@e03727d2d66b ancestor rev@924404dff337
385 389 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
386 390 (branch merge, don't forget to commit)
387 391 --------------
388 392 M b
389 393 C a
390 394 --------------
391 395
392 396 --------------
393 397 test L:up a R:um a b W: - 20 merge a and b to b, remove a
394 398 --------------
395 399 resolving manifests
396 400 overwrite None branchmerge True partial False
397 401 ancestor e300d1c794ec local 79cc6877a3b7 remote 924404dff337
398 402 a: remote moved -> c
399 403 rev: versions differ -> m
400 404 merging a and b to b
401 405 resolving a
402 406 my a@e300d1c794ec other b@79cc6877a3b7 ancestor a@924404dff337
403 407 copying a to b
404 408 removing a
405 409 merging rev
406 410 resolving rev
407 411 my rev@e300d1c794ec other rev@79cc6877a3b7 ancestor rev@924404dff337
408 412 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
409 413 (branch merge, don't forget to commit)
410 414 --------------
411 415 M b
412 416 a
413 417 --------------
414 418
415 419 --------------
416 420 test L:um a b R:up a W: - 21 merge a and b to b
417 421 --------------
418 422 resolving manifests
419 423 overwrite None branchmerge True partial False
420 424 ancestor ec03c2ca8642 local f4db7e329e71 remote 924404dff337
421 425 b: local moved -> c
422 426 rev: versions differ -> m
423 427 merging b and a to b
424 428 resolving b
425 429 my b@ec03c2ca8642 other a@f4db7e329e71 ancestor a@924404dff337
426 430 merging rev
427 431 resolving rev
428 432 my rev@ec03c2ca8642 other rev@f4db7e329e71 ancestor rev@924404dff337
429 433 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
430 434 (branch merge, don't forget to commit)
431 435 --------------
432 436 M b
437 a
433 438 --------------
434 439
435 440 --------------
436 441 test L:nm a b R:up a c W: - 23 get c, keep b
437 442 --------------
438 443 resolving manifests
439 444 overwrite None branchmerge True partial False
440 445 ancestor ecf3cb2a4219 local 2b958612230f remote 924404dff337
441 446 b: local moved -> c
442 447 rev: versions differ -> m
443 448 c: remote created -> g
444 449 merging b and a to b
445 450 resolving b
446 451 my b@ecf3cb2a4219 other a@2b958612230f ancestor a@924404dff337
447 452 getting c
448 453 merging rev
449 454 resolving rev
450 455 my rev@ecf3cb2a4219 other rev@2b958612230f ancestor rev@924404dff337
451 456 1 files updated, 2 files merged, 0 files removed, 0 files unresolved
452 457 (branch merge, don't forget to commit)
453 458 --------------
454 459 M b
460 a
455 461 M c
456 462 --------------
457 463
General Comments 0
You need to be logged in to leave comments. Login now