##// END OF EJS Templates
Clarified message when nothing to merge is seen....
Thomas Arendsen Hein -
r2548:0229ff95 default
parent child Browse files
Show More
@@ -1,2152 +1,2152 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "appendfile changegroup")
12 12 demandload(globals(), "changelog dirstate filelog manifest repo")
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 14 demandload(globals(), "os revlog util")
15 15
16 16 class localrepository(object):
17 17 capabilities = ()
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 if not path:
23 23 p = os.getcwd()
24 24 while not os.path.isdir(os.path.join(p, ".hg")):
25 25 oldp = p
26 26 p = os.path.dirname(p)
27 27 if p == oldp:
28 28 raise repo.RepoError(_("no repo found"))
29 29 path = p
30 30 self.path = os.path.join(path, ".hg")
31 31
32 32 if not create and not os.path.isdir(self.path):
33 33 raise repo.RepoError(_("repository %s not found") % path)
34 34
35 35 self.root = os.path.abspath(path)
36 36 self.origroot = path
37 37 self.ui = ui.ui(parentui=parentui)
38 38 self.opener = util.opener(self.path)
39 39 self.wopener = util.opener(self.root)
40 40
41 41 try:
42 42 self.ui.readconfig(self.join("hgrc"), self.root)
43 43 except IOError:
44 44 pass
45 45
46 46 v = self.ui.revlogopts
47 47 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
48 48 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
49 49 fl = v.get('flags', None)
50 50 flags = 0
51 51 if fl != None:
52 52 for x in fl.split():
53 53 flags |= revlog.flagstr(x)
54 54 elif self.revlogv1:
55 55 flags = revlog.REVLOG_DEFAULT_FLAGS
56 56
57 57 v = self.revlogversion | flags
58 58 self.manifest = manifest.manifest(self.opener, v)
59 59 self.changelog = changelog.changelog(self.opener, v)
60 60
61 61 # the changelog might not have the inline index flag
62 62 # on. If the format of the changelog is the same as found in
63 63 # .hgrc, apply any flags found in the .hgrc as well.
64 64 # Otherwise, just version from the changelog
65 65 v = self.changelog.version
66 66 if v == self.revlogversion:
67 67 v |= flags
68 68 self.revlogversion = v
69 69
70 70 self.tagscache = None
71 71 self.nodetagscache = None
72 72 self.encodepats = None
73 73 self.decodepats = None
74 74 self.transhandle = None
75 75
76 76 if create:
77 77 os.mkdir(self.path)
78 78 os.mkdir(self.join("data"))
79 79
80 80 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
81 81
82 82 def hook(self, name, throw=False, **args):
83 83 def callhook(hname, funcname):
84 84 '''call python hook. hook is callable object, looked up as
85 85 name in python module. if callable returns "true", hook
86 86 fails, else passes. if hook raises exception, treated as
87 87 hook failure. exception propagates if throw is "true".
88 88
89 89 reason for "true" meaning "hook failed" is so that
90 90 unmodified commands (e.g. mercurial.commands.update) can
91 91 be run as hooks without wrappers to convert return values.'''
92 92
93 93 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
94 94 d = funcname.rfind('.')
95 95 if d == -1:
96 96 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
97 97 % (hname, funcname))
98 98 modname = funcname[:d]
99 99 try:
100 100 obj = __import__(modname)
101 101 except ImportError:
102 102 raise util.Abort(_('%s hook is invalid '
103 103 '(import of "%s" failed)') %
104 104 (hname, modname))
105 105 try:
106 106 for p in funcname.split('.')[1:]:
107 107 obj = getattr(obj, p)
108 108 except AttributeError, err:
109 109 raise util.Abort(_('%s hook is invalid '
110 110 '("%s" is not defined)') %
111 111 (hname, funcname))
112 112 if not callable(obj):
113 113 raise util.Abort(_('%s hook is invalid '
114 114 '("%s" is not callable)') %
115 115 (hname, funcname))
116 116 try:
117 117 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
118 118 except (KeyboardInterrupt, util.SignalInterrupt):
119 119 raise
120 120 except Exception, exc:
121 121 if isinstance(exc, util.Abort):
122 122 self.ui.warn(_('error: %s hook failed: %s\n') %
123 123 (hname, exc.args[0] % exc.args[1:]))
124 124 else:
125 125 self.ui.warn(_('error: %s hook raised an exception: '
126 126 '%s\n') % (hname, exc))
127 127 if throw:
128 128 raise
129 129 self.ui.print_exc()
130 130 return True
131 131 if r:
132 132 if throw:
133 133 raise util.Abort(_('%s hook failed') % hname)
134 134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 135 return r
136 136
137 137 def runhook(name, cmd):
138 138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
140 140 r = util.system(cmd, environ=env, cwd=self.root)
141 141 if r:
142 142 desc, r = util.explain_exit(r)
143 143 if throw:
144 144 raise util.Abort(_('%s hook %s') % (name, desc))
145 145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 146 return r
147 147
148 148 r = False
149 149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 150 if hname.split(".", 1)[0] == name and cmd]
151 151 hooks.sort()
152 152 for hname, cmd in hooks:
153 153 if cmd.startswith('python:'):
154 154 r = callhook(hname, cmd[7:].strip()) or r
155 155 else:
156 156 r = runhook(hname, cmd) or r
157 157 return r
158 158
159 159 def tags(self):
160 160 '''return a mapping of tag to node'''
161 161 if not self.tagscache:
162 162 self.tagscache = {}
163 163
164 164 def parsetag(line, context):
165 165 if not line:
166 166 return
167 167 s = l.split(" ", 1)
168 168 if len(s) != 2:
169 169 self.ui.warn(_("%s: cannot parse entry\n") % context)
170 170 return
171 171 node, key = s
172 172 key = key.strip()
173 173 try:
174 174 bin_n = bin(node)
175 175 except TypeError:
176 176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
177 177 (context, node))
178 178 return
179 179 if bin_n not in self.changelog.nodemap:
180 180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
181 181 (context, key))
182 182 return
183 183 self.tagscache[key] = bin_n
184 184
185 185 # read the tags file from each head, ending with the tip,
186 186 # and add each tag found to the map, with "newer" ones
187 187 # taking precedence
188 188 heads = self.heads()
189 189 heads.reverse()
190 190 fl = self.file(".hgtags")
191 191 for node in heads:
192 192 change = self.changelog.read(node)
193 193 rev = self.changelog.rev(node)
194 194 fn, ff = self.manifest.find(change[0], '.hgtags')
195 195 if fn is None: continue
196 196 count = 0
197 197 for l in fl.read(fn).splitlines():
198 198 count += 1
199 199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
200 200 (rev, short(node), count))
201 201 try:
202 202 f = self.opener("localtags")
203 203 count = 0
204 204 for l in f:
205 205 count += 1
206 206 parsetag(l, _("localtags, line %d") % count)
207 207 except IOError:
208 208 pass
209 209
210 210 self.tagscache['tip'] = self.changelog.tip()
211 211
212 212 return self.tagscache
213 213
214 214 def tagslist(self):
215 215 '''return a list of tags ordered by revision'''
216 216 l = []
217 217 for t, n in self.tags().items():
218 218 try:
219 219 r = self.changelog.rev(n)
220 220 except:
221 221 r = -2 # sort to the beginning of the list if unknown
222 222 l.append((r, t, n))
223 223 l.sort()
224 224 return [(t, n) for r, t, n in l]
225 225
226 226 def nodetags(self, node):
227 227 '''return the tags associated with a node'''
228 228 if not self.nodetagscache:
229 229 self.nodetagscache = {}
230 230 for t, n in self.tags().items():
231 231 self.nodetagscache.setdefault(n, []).append(t)
232 232 return self.nodetagscache.get(node, [])
233 233
234 234 def lookup(self, key):
235 235 try:
236 236 return self.tags()[key]
237 237 except KeyError:
238 238 try:
239 239 return self.changelog.lookup(key)
240 240 except:
241 241 raise repo.RepoError(_("unknown revision '%s'") % key)
242 242
243 243 def dev(self):
244 244 return os.lstat(self.path).st_dev
245 245
246 246 def local(self):
247 247 return True
248 248
249 249 def join(self, f):
250 250 return os.path.join(self.path, f)
251 251
252 252 def wjoin(self, f):
253 253 return os.path.join(self.root, f)
254 254
255 255 def file(self, f):
256 256 if f[0] == '/':
257 257 f = f[1:]
258 258 return filelog.filelog(self.opener, f, self.revlogversion)
259 259
260 260 def getcwd(self):
261 261 return self.dirstate.getcwd()
262 262
263 263 def wfile(self, f, mode='r'):
264 264 return self.wopener(f, mode)
265 265
266 266 def wread(self, filename):
267 267 if self.encodepats == None:
268 268 l = []
269 269 for pat, cmd in self.ui.configitems("encode"):
270 270 mf = util.matcher(self.root, "", [pat], [], [])[1]
271 271 l.append((mf, cmd))
272 272 self.encodepats = l
273 273
274 274 data = self.wopener(filename, 'r').read()
275 275
276 276 for mf, cmd in self.encodepats:
277 277 if mf(filename):
278 278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
279 279 data = util.filter(data, cmd)
280 280 break
281 281
282 282 return data
283 283
284 284 def wwrite(self, filename, data, fd=None):
285 285 if self.decodepats == None:
286 286 l = []
287 287 for pat, cmd in self.ui.configitems("decode"):
288 288 mf = util.matcher(self.root, "", [pat], [], [])[1]
289 289 l.append((mf, cmd))
290 290 self.decodepats = l
291 291
292 292 for mf, cmd in self.decodepats:
293 293 if mf(filename):
294 294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
295 295 data = util.filter(data, cmd)
296 296 break
297 297
298 298 if fd:
299 299 return fd.write(data)
300 300 return self.wopener(filename, 'w').write(data)
301 301
302 302 def transaction(self):
303 303 tr = self.transhandle
304 304 if tr != None and tr.running():
305 305 return tr.nest()
306 306
307 307 # save dirstate for rollback
308 308 try:
309 309 ds = self.opener("dirstate").read()
310 310 except IOError:
311 311 ds = ""
312 312 self.opener("journal.dirstate", "w").write(ds)
313 313
314 314 tr = transaction.transaction(self.ui.warn, self.opener,
315 315 self.join("journal"),
316 316 aftertrans(self.path))
317 317 self.transhandle = tr
318 318 return tr
319 319
320 320 def recover(self):
321 321 l = self.lock()
322 322 if os.path.exists(self.join("journal")):
323 323 self.ui.status(_("rolling back interrupted transaction\n"))
324 324 transaction.rollback(self.opener, self.join("journal"))
325 325 self.reload()
326 326 return True
327 327 else:
328 328 self.ui.warn(_("no interrupted transaction available\n"))
329 329 return False
330 330
331 331 def rollback(self, wlock=None):
332 332 if not wlock:
333 333 wlock = self.wlock()
334 334 l = self.lock()
335 335 if os.path.exists(self.join("undo")):
336 336 self.ui.status(_("rolling back last transaction\n"))
337 337 transaction.rollback(self.opener, self.join("undo"))
338 338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
339 339 self.reload()
340 340 self.wreload()
341 341 else:
342 342 self.ui.warn(_("no rollback information available\n"))
343 343
344 344 def wreload(self):
345 345 self.dirstate.read()
346 346
347 347 def reload(self):
348 348 self.changelog.load()
349 349 self.manifest.load()
350 350 self.tagscache = None
351 351 self.nodetagscache = None
352 352
353 353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
354 354 desc=None):
355 355 try:
356 356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
357 357 except lock.LockHeld, inst:
358 358 if not wait:
359 359 raise
360 360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
361 361 (desc, inst.args[0]))
362 362 # default to 600 seconds timeout
363 363 l = lock.lock(self.join(lockname),
364 364 int(self.ui.config("ui", "timeout") or 600),
365 365 releasefn, desc=desc)
366 366 if acquirefn:
367 367 acquirefn()
368 368 return l
369 369
370 370 def lock(self, wait=1):
371 371 return self.do_lock("lock", wait, acquirefn=self.reload,
372 372 desc=_('repository %s') % self.origroot)
373 373
374 374 def wlock(self, wait=1):
375 375 return self.do_lock("wlock", wait, self.dirstate.write,
376 376 self.wreload,
377 377 desc=_('working directory of %s') % self.origroot)
378 378
379 379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
380 380 "determine whether a new filenode is needed"
381 381 fp1 = manifest1.get(filename, nullid)
382 382 fp2 = manifest2.get(filename, nullid)
383 383
384 384 if fp2 != nullid:
385 385 # is one parent an ancestor of the other?
386 386 fpa = filelog.ancestor(fp1, fp2)
387 387 if fpa == fp1:
388 388 fp1, fp2 = fp2, nullid
389 389 elif fpa == fp2:
390 390 fp2 = nullid
391 391
392 392 # is the file unmodified from the parent? report existing entry
393 393 if fp2 == nullid and text == filelog.read(fp1):
394 394 return (fp1, None, None)
395 395
396 396 return (None, fp1, fp2)
397 397
398 398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
399 399 orig_parent = self.dirstate.parents()[0] or nullid
400 400 p1 = p1 or self.dirstate.parents()[0] or nullid
401 401 p2 = p2 or self.dirstate.parents()[1] or nullid
402 402 c1 = self.changelog.read(p1)
403 403 c2 = self.changelog.read(p2)
404 404 m1 = self.manifest.read(c1[0])
405 405 mf1 = self.manifest.readflags(c1[0])
406 406 m2 = self.manifest.read(c2[0])
407 407 changed = []
408 408
409 409 if orig_parent == p1:
410 410 update_dirstate = 1
411 411 else:
412 412 update_dirstate = 0
413 413
414 414 if not wlock:
415 415 wlock = self.wlock()
416 416 l = self.lock()
417 417 tr = self.transaction()
418 418 mm = m1.copy()
419 419 mfm = mf1.copy()
420 420 linkrev = self.changelog.count()
421 421 for f in files:
422 422 try:
423 423 t = self.wread(f)
424 424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
425 425 r = self.file(f)
426 426 mfm[f] = tm
427 427
428 428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
429 429 if entry:
430 430 mm[f] = entry
431 431 continue
432 432
433 433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
434 434 changed.append(f)
435 435 if update_dirstate:
436 436 self.dirstate.update([f], "n")
437 437 except IOError:
438 438 try:
439 439 del mm[f]
440 440 del mfm[f]
441 441 if update_dirstate:
442 442 self.dirstate.forget([f])
443 443 except:
444 444 # deleted from p2?
445 445 pass
446 446
447 447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
448 448 user = user or self.ui.username()
449 449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
450 450 tr.close()
451 451 if update_dirstate:
452 452 self.dirstate.setparents(n, nullid)
453 453
454 454 def commit(self, files=None, text="", user=None, date=None,
455 455 match=util.always, force=False, lock=None, wlock=None,
456 456 force_editor=False):
457 457 commit = []
458 458 remove = []
459 459 changed = []
460 460
461 461 if files:
462 462 for f in files:
463 463 s = self.dirstate.state(f)
464 464 if s in 'nmai':
465 465 commit.append(f)
466 466 elif s == 'r':
467 467 remove.append(f)
468 468 else:
469 469 self.ui.warn(_("%s not tracked!\n") % f)
470 470 else:
471 471 modified, added, removed, deleted, unknown = self.changes(match=match)
472 472 commit = modified + added
473 473 remove = removed
474 474
475 475 p1, p2 = self.dirstate.parents()
476 476 c1 = self.changelog.read(p1)
477 477 c2 = self.changelog.read(p2)
478 478 m1 = self.manifest.read(c1[0])
479 479 mf1 = self.manifest.readflags(c1[0])
480 480 m2 = self.manifest.read(c2[0])
481 481
482 482 if not commit and not remove and not force and p2 == nullid:
483 483 self.ui.status(_("nothing changed\n"))
484 484 return None
485 485
486 486 xp1 = hex(p1)
487 487 if p2 == nullid: xp2 = ''
488 488 else: xp2 = hex(p2)
489 489
490 490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
491 491
492 492 if not wlock:
493 493 wlock = self.wlock()
494 494 if not lock:
495 495 lock = self.lock()
496 496 tr = self.transaction()
497 497
498 498 # check in files
499 499 new = {}
500 500 linkrev = self.changelog.count()
501 501 commit.sort()
502 502 for f in commit:
503 503 self.ui.note(f + "\n")
504 504 try:
505 505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
506 506 t = self.wread(f)
507 507 except IOError:
508 508 self.ui.warn(_("trouble committing %s!\n") % f)
509 509 raise
510 510
511 511 r = self.file(f)
512 512
513 513 meta = {}
514 514 cp = self.dirstate.copied(f)
515 515 if cp:
516 516 meta["copy"] = cp
517 517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
518 518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
519 519 fp1, fp2 = nullid, nullid
520 520 else:
521 521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
522 522 if entry:
523 523 new[f] = entry
524 524 continue
525 525
526 526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
527 527 # remember what we've added so that we can later calculate
528 528 # the files to pull from a set of changesets
529 529 changed.append(f)
530 530
531 531 # update manifest
532 532 m1 = m1.copy()
533 533 m1.update(new)
534 534 for f in remove:
535 535 if f in m1:
536 536 del m1[f]
537 537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
538 538 (new, remove))
539 539
540 540 # add changeset
541 541 new = new.keys()
542 542 new.sort()
543 543
544 544 user = user or self.ui.username()
545 545 if not text or force_editor:
546 546 edittext = []
547 547 if text:
548 548 edittext.append(text)
549 549 edittext.append("")
550 550 if p2 != nullid:
551 551 edittext.append("HG: branch merge")
552 552 edittext.extend(["HG: changed %s" % f for f in changed])
553 553 edittext.extend(["HG: removed %s" % f for f in remove])
554 554 if not changed and not remove:
555 555 edittext.append("HG: no files changed")
556 556 edittext.append("")
557 557 # run editor in the repository root
558 558 olddir = os.getcwd()
559 559 os.chdir(self.root)
560 560 text = self.ui.edit("\n".join(edittext), user)
561 561 os.chdir(olddir)
562 562
563 563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
564 564 while lines and not lines[0]:
565 565 del lines[0]
566 566 if not lines:
567 567 return None
568 568 text = '\n'.join(lines)
569 569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
570 570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
571 571 parent2=xp2)
572 572 tr.close()
573 573
574 574 self.dirstate.setparents(n)
575 575 self.dirstate.update(new, "n")
576 576 self.dirstate.forget(remove)
577 577
578 578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
579 579 return n
580 580
581 581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
582 582 if node:
583 583 fdict = dict.fromkeys(files)
584 584 for fn in self.manifest.read(self.changelog.read(node)[0]):
585 585 fdict.pop(fn, None)
586 586 if match(fn):
587 587 yield 'm', fn
588 588 for fn in fdict:
589 589 if badmatch and badmatch(fn):
590 590 if match(fn):
591 591 yield 'b', fn
592 592 else:
593 593 self.ui.warn(_('%s: No such file in rev %s\n') % (
594 594 util.pathto(self.getcwd(), fn), short(node)))
595 595 else:
596 596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
597 597 yield src, fn
598 598
599 599 def changes(self, node1=None, node2=None, files=[], match=util.always,
600 600 wlock=None, show_ignored=None):
601 601 """return changes between two nodes or node and working directory
602 602
603 603 If node1 is None, use the first dirstate parent instead.
604 604 If node2 is None, compare node1 with working directory.
605 605 """
606 606
607 607 def fcmp(fn, mf):
608 608 t1 = self.wread(fn)
609 609 t2 = self.file(fn).read(mf.get(fn, nullid))
610 610 return cmp(t1, t2)
611 611
612 612 def mfmatches(node):
613 613 change = self.changelog.read(node)
614 614 mf = dict(self.manifest.read(change[0]))
615 615 for fn in mf.keys():
616 616 if not match(fn):
617 617 del mf[fn]
618 618 return mf
619 619
620 620 modified, added, removed, deleted, unknown, ignored = [],[],[],[],[],[]
621 621 compareworking = False
622 622 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
623 623 compareworking = True
624 624
625 625 if not compareworking:
626 626 # read the manifest from node1 before the manifest from node2,
627 627 # so that we'll hit the manifest cache if we're going through
628 628 # all the revisions in parent->child order.
629 629 mf1 = mfmatches(node1)
630 630
631 631 # are we comparing the working directory?
632 632 if not node2:
633 633 if not wlock:
634 634 try:
635 635 wlock = self.wlock(wait=0)
636 636 except lock.LockException:
637 637 wlock = None
638 638 lookup, modified, added, removed, deleted, unknown, ignored = (
639 639 self.dirstate.changes(files, match, show_ignored))
640 640
641 641 # are we comparing working dir against its parent?
642 642 if compareworking:
643 643 if lookup:
644 644 # do a full compare of any files that might have changed
645 645 mf2 = mfmatches(self.dirstate.parents()[0])
646 646 for f in lookup:
647 647 if fcmp(f, mf2):
648 648 modified.append(f)
649 649 elif wlock is not None:
650 650 self.dirstate.update([f], "n")
651 651 else:
652 652 # we are comparing working dir against non-parent
653 653 # generate a pseudo-manifest for the working dir
654 654 mf2 = mfmatches(self.dirstate.parents()[0])
655 655 for f in lookup + modified + added:
656 656 mf2[f] = ""
657 657 for f in removed:
658 658 if f in mf2:
659 659 del mf2[f]
660 660 else:
661 661 # we are comparing two revisions
662 662 deleted, unknown, ignored = [], [], []
663 663 mf2 = mfmatches(node2)
664 664
665 665 if not compareworking:
666 666 # flush lists from dirstate before comparing manifests
667 667 modified, added = [], []
668 668
669 669 # make sure to sort the files so we talk to the disk in a
670 670 # reasonable order
671 671 mf2keys = mf2.keys()
672 672 mf2keys.sort()
673 673 for fn in mf2keys:
674 674 if mf1.has_key(fn):
675 675 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
676 676 modified.append(fn)
677 677 del mf1[fn]
678 678 else:
679 679 added.append(fn)
680 680
681 681 removed = mf1.keys()
682 682
683 683 # sort and return results:
684 684 for l in modified, added, removed, deleted, unknown, ignored:
685 685 l.sort()
686 686 if show_ignored is None:
687 687 return (modified, added, removed, deleted, unknown)
688 688 else:
689 689 return (modified, added, removed, deleted, unknown, ignored)
690 690
691 691 def add(self, list, wlock=None):
692 692 if not wlock:
693 693 wlock = self.wlock()
694 694 for f in list:
695 695 p = self.wjoin(f)
696 696 if not os.path.exists(p):
697 697 self.ui.warn(_("%s does not exist!\n") % f)
698 698 elif not os.path.isfile(p):
699 699 self.ui.warn(_("%s not added: only files supported currently\n")
700 700 % f)
701 701 elif self.dirstate.state(f) in 'an':
702 702 self.ui.warn(_("%s already tracked!\n") % f)
703 703 else:
704 704 self.dirstate.update([f], "a")
705 705
706 706 def forget(self, list, wlock=None):
707 707 if not wlock:
708 708 wlock = self.wlock()
709 709 for f in list:
710 710 if self.dirstate.state(f) not in 'ai':
711 711 self.ui.warn(_("%s not added!\n") % f)
712 712 else:
713 713 self.dirstate.forget([f])
714 714
715 715 def remove(self, list, unlink=False, wlock=None):
716 716 if unlink:
717 717 for f in list:
718 718 try:
719 719 util.unlink(self.wjoin(f))
720 720 except OSError, inst:
721 721 if inst.errno != errno.ENOENT:
722 722 raise
723 723 if not wlock:
724 724 wlock = self.wlock()
725 725 for f in list:
726 726 p = self.wjoin(f)
727 727 if os.path.exists(p):
728 728 self.ui.warn(_("%s still exists!\n") % f)
729 729 elif self.dirstate.state(f) == 'a':
730 730 self.dirstate.forget([f])
731 731 elif f not in self.dirstate:
732 732 self.ui.warn(_("%s not tracked!\n") % f)
733 733 else:
734 734 self.dirstate.update([f], "r")
735 735
736 736 def undelete(self, list, wlock=None):
737 737 p = self.dirstate.parents()[0]
738 738 mn = self.changelog.read(p)[0]
739 739 mf = self.manifest.readflags(mn)
740 740 m = self.manifest.read(mn)
741 741 if not wlock:
742 742 wlock = self.wlock()
743 743 for f in list:
744 744 if self.dirstate.state(f) not in "r":
745 745 self.ui.warn("%s not removed!\n" % f)
746 746 else:
747 747 t = self.file(f).read(m[f])
748 748 self.wwrite(f, t)
749 749 util.set_exec(self.wjoin(f), mf[f])
750 750 self.dirstate.update([f], "n")
751 751
752 752 def copy(self, source, dest, wlock=None):
753 753 p = self.wjoin(dest)
754 754 if not os.path.exists(p):
755 755 self.ui.warn(_("%s does not exist!\n") % dest)
756 756 elif not os.path.isfile(p):
757 757 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
758 758 else:
759 759 if not wlock:
760 760 wlock = self.wlock()
761 761 if self.dirstate.state(dest) == '?':
762 762 self.dirstate.update([dest], "a")
763 763 self.dirstate.copy(source, dest)
764 764
765 765 def heads(self, start=None):
766 766 heads = self.changelog.heads(start)
767 767 # sort the output in rev descending order
768 768 heads = [(-self.changelog.rev(h), h) for h in heads]
769 769 heads.sort()
770 770 return [n for (r, n) in heads]
771 771
772 772 # branchlookup returns a dict giving a list of branches for
773 773 # each head. A branch is defined as the tag of a node or
774 774 # the branch of the node's parents. If a node has multiple
775 775 # branch tags, tags are eliminated if they are visible from other
776 776 # branch tags.
777 777 #
778 778 # So, for this graph: a->b->c->d->e
779 779 # \ /
780 780 # aa -----/
781 781 # a has tag 2.6.12
782 782 # d has tag 2.6.13
783 783 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
784 784 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
785 785 # from the list.
786 786 #
787 787 # It is possible that more than one head will have the same branch tag.
788 788 # callers need to check the result for multiple heads under the same
789 789 # branch tag if that is a problem for them (ie checkout of a specific
790 790 # branch).
791 791 #
792 792 # passing in a specific branch will limit the depth of the search
793 793 # through the parents. It won't limit the branches returned in the
794 794 # result though.
795 795 def branchlookup(self, heads=None, branch=None):
796 796 if not heads:
797 797 heads = self.heads()
798 798 headt = [ h for h in heads ]
799 799 chlog = self.changelog
800 800 branches = {}
801 801 merges = []
802 802 seenmerge = {}
803 803
804 804 # traverse the tree once for each head, recording in the branches
805 805 # dict which tags are visible from this head. The branches
806 806 # dict also records which tags are visible from each tag
807 807 # while we traverse.
808 808 while headt or merges:
809 809 if merges:
810 810 n, found = merges.pop()
811 811 visit = [n]
812 812 else:
813 813 h = headt.pop()
814 814 visit = [h]
815 815 found = [h]
816 816 seen = {}
817 817 while visit:
818 818 n = visit.pop()
819 819 if n in seen:
820 820 continue
821 821 pp = chlog.parents(n)
822 822 tags = self.nodetags(n)
823 823 if tags:
824 824 for x in tags:
825 825 if x == 'tip':
826 826 continue
827 827 for f in found:
828 828 branches.setdefault(f, {})[n] = 1
829 829 branches.setdefault(n, {})[n] = 1
830 830 break
831 831 if n not in found:
832 832 found.append(n)
833 833 if branch in tags:
834 834 continue
835 835 seen[n] = 1
836 836 if pp[1] != nullid and n not in seenmerge:
837 837 merges.append((pp[1], [x for x in found]))
838 838 seenmerge[n] = 1
839 839 if pp[0] != nullid:
840 840 visit.append(pp[0])
841 841 # traverse the branches dict, eliminating branch tags from each
842 842 # head that are visible from another branch tag for that head.
843 843 out = {}
844 844 viscache = {}
845 845 for h in heads:
846 846 def visible(node):
847 847 if node in viscache:
848 848 return viscache[node]
849 849 ret = {}
850 850 visit = [node]
851 851 while visit:
852 852 x = visit.pop()
853 853 if x in viscache:
854 854 ret.update(viscache[x])
855 855 elif x not in ret:
856 856 ret[x] = 1
857 857 if x in branches:
858 858 visit[len(visit):] = branches[x].keys()
859 859 viscache[node] = ret
860 860 return ret
861 861 if h not in branches:
862 862 continue
863 863 # O(n^2), but somewhat limited. This only searches the
864 864 # tags visible from a specific head, not all the tags in the
865 865 # whole repo.
866 866 for b in branches[h]:
867 867 vis = False
868 868 for bb in branches[h].keys():
869 869 if b != bb:
870 870 if b in visible(bb):
871 871 vis = True
872 872 break
873 873 if not vis:
874 874 l = out.setdefault(h, [])
875 875 l[len(l):] = self.nodetags(b)
876 876 return out
877 877
878 878 def branches(self, nodes):
879 879 if not nodes:
880 880 nodes = [self.changelog.tip()]
881 881 b = []
882 882 for n in nodes:
883 883 t = n
884 884 while 1:
885 885 p = self.changelog.parents(n)
886 886 if p[1] != nullid or p[0] == nullid:
887 887 b.append((t, n, p[0], p[1]))
888 888 break
889 889 n = p[0]
890 890 return b
891 891
892 892 def between(self, pairs):
893 893 r = []
894 894
895 895 for top, bottom in pairs:
896 896 n, l, i = top, [], 0
897 897 f = 1
898 898
899 899 while n != bottom:
900 900 p = self.changelog.parents(n)[0]
901 901 if i == f:
902 902 l.append(n)
903 903 f = f * 2
904 904 n = p
905 905 i += 1
906 906
907 907 r.append(l)
908 908
909 909 return r
910 910
911 911 def findincoming(self, remote, base=None, heads=None, force=False):
912 912 """Return list of roots of the subsets of missing nodes from remote
913 913
914 914 If base dict is specified, assume that these nodes and their parents
915 915 exist on the remote side and that no child of a node of base exists
916 916 in both remote and self.
917 917 Furthermore base will be updated to include the nodes that exists
918 918 in self and remote but no children exists in self and remote.
919 919 If a list of heads is specified, return only nodes which are heads
920 920 or ancestors of these heads.
921 921
922 922 All the ancestors of base are in self and in remote.
923 923 All the descendants of the list returned are missing in self.
924 924 (and so we know that the rest of the nodes are missing in remote, see
925 925 outgoing)
926 926 """
927 927 m = self.changelog.nodemap
928 928 search = []
929 929 fetch = {}
930 930 seen = {}
931 931 seenbranch = {}
932 932 if base == None:
933 933 base = {}
934 934
935 935 if not heads:
936 936 heads = remote.heads()
937 937
938 938 if self.changelog.tip() == nullid:
939 939 base[nullid] = 1
940 940 if heads != [nullid]:
941 941 return [nullid]
942 942 return []
943 943
944 944 # assume we're closer to the tip than the root
945 945 # and start by examining the heads
946 946 self.ui.status(_("searching for changes\n"))
947 947
948 948 unknown = []
949 949 for h in heads:
950 950 if h not in m:
951 951 unknown.append(h)
952 952 else:
953 953 base[h] = 1
954 954
955 955 if not unknown:
956 956 return []
957 957
958 958 req = dict.fromkeys(unknown)
959 959 reqcnt = 0
960 960
961 961 # search through remote branches
962 962 # a 'branch' here is a linear segment of history, with four parts:
963 963 # head, root, first parent, second parent
964 964 # (a branch always has two parents (or none) by definition)
965 965 unknown = remote.branches(unknown)
966 966 while unknown:
967 967 r = []
968 968 while unknown:
969 969 n = unknown.pop(0)
970 970 if n[0] in seen:
971 971 continue
972 972
973 973 self.ui.debug(_("examining %s:%s\n")
974 974 % (short(n[0]), short(n[1])))
975 975 if n[0] == nullid: # found the end of the branch
976 976 pass
977 977 elif n in seenbranch:
978 978 self.ui.debug(_("branch already found\n"))
979 979 continue
980 980 elif n[1] and n[1] in m: # do we know the base?
981 981 self.ui.debug(_("found incomplete branch %s:%s\n")
982 982 % (short(n[0]), short(n[1])))
983 983 search.append(n) # schedule branch range for scanning
984 984 seenbranch[n] = 1
985 985 else:
986 986 if n[1] not in seen and n[1] not in fetch:
987 987 if n[2] in m and n[3] in m:
988 988 self.ui.debug(_("found new changeset %s\n") %
989 989 short(n[1]))
990 990 fetch[n[1]] = 1 # earliest unknown
991 991 for p in n[2:4]:
992 992 if p in m:
993 993 base[p] = 1 # latest known
994 994
995 995 for p in n[2:4]:
996 996 if p not in req and p not in m:
997 997 r.append(p)
998 998 req[p] = 1
999 999 seen[n[0]] = 1
1000 1000
1001 1001 if r:
1002 1002 reqcnt += 1
1003 1003 self.ui.debug(_("request %d: %s\n") %
1004 1004 (reqcnt, " ".join(map(short, r))))
1005 1005 for p in range(0, len(r), 10):
1006 1006 for b in remote.branches(r[p:p+10]):
1007 1007 self.ui.debug(_("received %s:%s\n") %
1008 1008 (short(b[0]), short(b[1])))
1009 1009 unknown.append(b)
1010 1010
1011 1011 # do binary search on the branches we found
1012 1012 while search:
1013 1013 n = search.pop(0)
1014 1014 reqcnt += 1
1015 1015 l = remote.between([(n[0], n[1])])[0]
1016 1016 l.append(n[1])
1017 1017 p = n[0]
1018 1018 f = 1
1019 1019 for i in l:
1020 1020 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1021 1021 if i in m:
1022 1022 if f <= 2:
1023 1023 self.ui.debug(_("found new branch changeset %s\n") %
1024 1024 short(p))
1025 1025 fetch[p] = 1
1026 1026 base[i] = 1
1027 1027 else:
1028 1028 self.ui.debug(_("narrowed branch search to %s:%s\n")
1029 1029 % (short(p), short(i)))
1030 1030 search.append((p, i))
1031 1031 break
1032 1032 p, f = i, f * 2
1033 1033
1034 1034 # sanity check our fetch list
1035 1035 for f in fetch.keys():
1036 1036 if f in m:
1037 1037 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1038 1038
1039 1039 if base.keys() == [nullid]:
1040 1040 if force:
1041 1041 self.ui.warn(_("warning: repository is unrelated\n"))
1042 1042 else:
1043 1043 raise util.Abort(_("repository is unrelated"))
1044 1044
1045 1045 self.ui.note(_("found new changesets starting at ") +
1046 1046 " ".join([short(f) for f in fetch]) + "\n")
1047 1047
1048 1048 self.ui.debug(_("%d total queries\n") % reqcnt)
1049 1049
1050 1050 return fetch.keys()
1051 1051
1052 1052 def findoutgoing(self, remote, base=None, heads=None, force=False):
1053 1053 """Return list of nodes that are roots of subsets not in remote
1054 1054
1055 1055 If base dict is specified, assume that these nodes and their parents
1056 1056 exist on the remote side.
1057 1057 If a list of heads is specified, return only nodes which are heads
1058 1058 or ancestors of these heads, and return a second element which
1059 1059 contains all remote heads which get new children.
1060 1060 """
1061 1061 if base == None:
1062 1062 base = {}
1063 1063 self.findincoming(remote, base, heads, force=force)
1064 1064
1065 1065 self.ui.debug(_("common changesets up to ")
1066 1066 + " ".join(map(short, base.keys())) + "\n")
1067 1067
1068 1068 remain = dict.fromkeys(self.changelog.nodemap)
1069 1069
1070 1070 # prune everything remote has from the tree
1071 1071 del remain[nullid]
1072 1072 remove = base.keys()
1073 1073 while remove:
1074 1074 n = remove.pop(0)
1075 1075 if n in remain:
1076 1076 del remain[n]
1077 1077 for p in self.changelog.parents(n):
1078 1078 remove.append(p)
1079 1079
1080 1080 # find every node whose parents have been pruned
1081 1081 subset = []
1082 1082 # find every remote head that will get new children
1083 1083 updated_heads = {}
1084 1084 for n in remain:
1085 1085 p1, p2 = self.changelog.parents(n)
1086 1086 if p1 not in remain and p2 not in remain:
1087 1087 subset.append(n)
1088 1088 if heads:
1089 1089 if p1 in heads:
1090 1090 updated_heads[p1] = True
1091 1091 if p2 in heads:
1092 1092 updated_heads[p2] = True
1093 1093
1094 1094 # this is the set of all roots we have to push
1095 1095 if heads:
1096 1096 return subset, updated_heads.keys()
1097 1097 else:
1098 1098 return subset
1099 1099
1100 1100 def pull(self, remote, heads=None, force=False):
1101 1101 l = self.lock()
1102 1102
1103 1103 fetch = self.findincoming(remote, force=force)
1104 1104 if fetch == [nullid]:
1105 1105 self.ui.status(_("requesting all changes\n"))
1106 1106
1107 1107 if not fetch:
1108 1108 self.ui.status(_("no changes found\n"))
1109 1109 return 0
1110 1110
1111 1111 if heads is None:
1112 1112 cg = remote.changegroup(fetch, 'pull')
1113 1113 else:
1114 1114 cg = remote.changegroupsubset(fetch, heads, 'pull')
1115 1115 return self.addchangegroup(cg, 'pull')
1116 1116
1117 1117 def push(self, remote, force=False, revs=None):
1118 1118 # there are two ways to push to remote repo:
1119 1119 #
1120 1120 # addchangegroup assumes local user can lock remote
1121 1121 # repo (local filesystem, old ssh servers).
1122 1122 #
1123 1123 # unbundle assumes local user cannot lock remote repo (new ssh
1124 1124 # servers, http servers).
1125 1125
1126 1126 if 'unbundle' in remote.capabilities:
1127 1127 return self.push_unbundle(remote, force, revs)
1128 1128 return self.push_addchangegroup(remote, force, revs)
1129 1129
1130 1130 def prepush(self, remote, force, revs):
1131 1131 base = {}
1132 1132 remote_heads = remote.heads()
1133 1133 inc = self.findincoming(remote, base, remote_heads, force=force)
1134 1134 if not force and inc:
1135 1135 self.ui.warn(_("abort: unsynced remote changes!\n"))
1136 1136 self.ui.status(_("(did you forget to sync?"
1137 1137 " use push -f to force)\n"))
1138 1138 return None, 1
1139 1139
1140 1140 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1141 1141 if revs is not None:
1142 1142 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1143 1143 else:
1144 1144 bases, heads = update, self.changelog.heads()
1145 1145
1146 1146 if not bases:
1147 1147 self.ui.status(_("no changes found\n"))
1148 1148 return None, 1
1149 1149 elif not force:
1150 1150 # FIXME we don't properly detect creation of new heads
1151 1151 # in the push -r case, assume the user knows what he's doing
1152 1152 if not revs and len(remote_heads) < len(heads) \
1153 1153 and remote_heads != [nullid]:
1154 1154 self.ui.warn(_("abort: push creates new remote branches!\n"))
1155 1155 self.ui.status(_("(did you forget to merge?"
1156 1156 " use push -f to force)\n"))
1157 1157 return None, 1
1158 1158
1159 1159 if revs is None:
1160 1160 cg = self.changegroup(update, 'push')
1161 1161 else:
1162 1162 cg = self.changegroupsubset(update, revs, 'push')
1163 1163 return cg, remote_heads
1164 1164
1165 1165 def push_addchangegroup(self, remote, force, revs):
1166 1166 lock = remote.lock()
1167 1167
1168 1168 ret = self.prepush(remote, force, revs)
1169 1169 if ret[0] is not None:
1170 1170 cg, remote_heads = ret
1171 1171 return remote.addchangegroup(cg, 'push')
1172 1172 return ret[1]
1173 1173
1174 1174 def push_unbundle(self, remote, force, revs):
1175 1175 # local repo finds heads on server, finds out what revs it
1176 1176 # must push. once revs transferred, if server finds it has
1177 1177 # different heads (someone else won commit/push race), server
1178 1178 # aborts.
1179 1179
1180 1180 ret = self.prepush(remote, force, revs)
1181 1181 if ret[0] is not None:
1182 1182 cg, remote_heads = ret
1183 1183 if force: remote_heads = ['force']
1184 1184 return remote.unbundle(cg, remote_heads, 'push')
1185 1185 return ret[1]
1186 1186
1187 1187 def changegroupsubset(self, bases, heads, source):
1188 1188 """This function generates a changegroup consisting of all the nodes
1189 1189 that are descendents of any of the bases, and ancestors of any of
1190 1190 the heads.
1191 1191
1192 1192 It is fairly complex as determining which filenodes and which
1193 1193 manifest nodes need to be included for the changeset to be complete
1194 1194 is non-trivial.
1195 1195
1196 1196 Another wrinkle is doing the reverse, figuring out which changeset in
1197 1197 the changegroup a particular filenode or manifestnode belongs to."""
1198 1198
1199 1199 self.hook('preoutgoing', throw=True, source=source)
1200 1200
1201 1201 # Set up some initial variables
1202 1202 # Make it easy to refer to self.changelog
1203 1203 cl = self.changelog
1204 1204 # msng is short for missing - compute the list of changesets in this
1205 1205 # changegroup.
1206 1206 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1207 1207 # Some bases may turn out to be superfluous, and some heads may be
1208 1208 # too. nodesbetween will return the minimal set of bases and heads
1209 1209 # necessary to re-create the changegroup.
1210 1210
1211 1211 # Known heads are the list of heads that it is assumed the recipient
1212 1212 # of this changegroup will know about.
1213 1213 knownheads = {}
1214 1214 # We assume that all parents of bases are known heads.
1215 1215 for n in bases:
1216 1216 for p in cl.parents(n):
1217 1217 if p != nullid:
1218 1218 knownheads[p] = 1
1219 1219 knownheads = knownheads.keys()
1220 1220 if knownheads:
1221 1221 # Now that we know what heads are known, we can compute which
1222 1222 # changesets are known. The recipient must know about all
1223 1223 # changesets required to reach the known heads from the null
1224 1224 # changeset.
1225 1225 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1226 1226 junk = None
1227 1227 # Transform the list into an ersatz set.
1228 1228 has_cl_set = dict.fromkeys(has_cl_set)
1229 1229 else:
1230 1230 # If there were no known heads, the recipient cannot be assumed to
1231 1231 # know about any changesets.
1232 1232 has_cl_set = {}
1233 1233
1234 1234 # Make it easy to refer to self.manifest
1235 1235 mnfst = self.manifest
1236 1236 # We don't know which manifests are missing yet
1237 1237 msng_mnfst_set = {}
1238 1238 # Nor do we know which filenodes are missing.
1239 1239 msng_filenode_set = {}
1240 1240
1241 1241 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1242 1242 junk = None
1243 1243
1244 1244 # A changeset always belongs to itself, so the changenode lookup
1245 1245 # function for a changenode is identity.
1246 1246 def identity(x):
1247 1247 return x
1248 1248
1249 1249 # A function generating function. Sets up an environment for the
1250 1250 # inner function.
1251 1251 def cmp_by_rev_func(revlog):
1252 1252 # Compare two nodes by their revision number in the environment's
1253 1253 # revision history. Since the revision number both represents the
1254 1254 # most efficient order to read the nodes in, and represents a
1255 1255 # topological sorting of the nodes, this function is often useful.
1256 1256 def cmp_by_rev(a, b):
1257 1257 return cmp(revlog.rev(a), revlog.rev(b))
1258 1258 return cmp_by_rev
1259 1259
1260 1260 # If we determine that a particular file or manifest node must be a
1261 1261 # node that the recipient of the changegroup will already have, we can
1262 1262 # also assume the recipient will have all the parents. This function
1263 1263 # prunes them from the set of missing nodes.
1264 1264 def prune_parents(revlog, hasset, msngset):
1265 1265 haslst = hasset.keys()
1266 1266 haslst.sort(cmp_by_rev_func(revlog))
1267 1267 for node in haslst:
1268 1268 parentlst = [p for p in revlog.parents(node) if p != nullid]
1269 1269 while parentlst:
1270 1270 n = parentlst.pop()
1271 1271 if n not in hasset:
1272 1272 hasset[n] = 1
1273 1273 p = [p for p in revlog.parents(n) if p != nullid]
1274 1274 parentlst.extend(p)
1275 1275 for n in hasset:
1276 1276 msngset.pop(n, None)
1277 1277
1278 1278 # This is a function generating function used to set up an environment
1279 1279 # for the inner function to execute in.
1280 1280 def manifest_and_file_collector(changedfileset):
1281 1281 # This is an information gathering function that gathers
1282 1282 # information from each changeset node that goes out as part of
1283 1283 # the changegroup. The information gathered is a list of which
1284 1284 # manifest nodes are potentially required (the recipient may
1285 1285 # already have them) and total list of all files which were
1286 1286 # changed in any changeset in the changegroup.
1287 1287 #
1288 1288 # We also remember the first changenode we saw any manifest
1289 1289 # referenced by so we can later determine which changenode 'owns'
1290 1290 # the manifest.
1291 1291 def collect_manifests_and_files(clnode):
1292 1292 c = cl.read(clnode)
1293 1293 for f in c[3]:
1294 1294 # This is to make sure we only have one instance of each
1295 1295 # filename string for each filename.
1296 1296 changedfileset.setdefault(f, f)
1297 1297 msng_mnfst_set.setdefault(c[0], clnode)
1298 1298 return collect_manifests_and_files
1299 1299
1300 1300 # Figure out which manifest nodes (of the ones we think might be part
1301 1301 # of the changegroup) the recipient must know about and remove them
1302 1302 # from the changegroup.
1303 1303 def prune_manifests():
1304 1304 has_mnfst_set = {}
1305 1305 for n in msng_mnfst_set:
1306 1306 # If a 'missing' manifest thinks it belongs to a changenode
1307 1307 # the recipient is assumed to have, obviously the recipient
1308 1308 # must have that manifest.
1309 1309 linknode = cl.node(mnfst.linkrev(n))
1310 1310 if linknode in has_cl_set:
1311 1311 has_mnfst_set[n] = 1
1312 1312 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1313 1313
1314 1314 # Use the information collected in collect_manifests_and_files to say
1315 1315 # which changenode any manifestnode belongs to.
1316 1316 def lookup_manifest_link(mnfstnode):
1317 1317 return msng_mnfst_set[mnfstnode]
1318 1318
1319 1319 # A function generating function that sets up the initial environment
1320 1320 # the inner function.
1321 1321 def filenode_collector(changedfiles):
1322 1322 next_rev = [0]
1323 1323 # This gathers information from each manifestnode included in the
1324 1324 # changegroup about which filenodes the manifest node references
1325 1325 # so we can include those in the changegroup too.
1326 1326 #
1327 1327 # It also remembers which changenode each filenode belongs to. It
1328 1328 # does this by assuming the a filenode belongs to the changenode
1329 1329 # the first manifest that references it belongs to.
1330 1330 def collect_msng_filenodes(mnfstnode):
1331 1331 r = mnfst.rev(mnfstnode)
1332 1332 if r == next_rev[0]:
1333 1333 # If the last rev we looked at was the one just previous,
1334 1334 # we only need to see a diff.
1335 1335 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1336 1336 # For each line in the delta
1337 1337 for dline in delta.splitlines():
1338 1338 # get the filename and filenode for that line
1339 1339 f, fnode = dline.split('\0')
1340 1340 fnode = bin(fnode[:40])
1341 1341 f = changedfiles.get(f, None)
1342 1342 # And if the file is in the list of files we care
1343 1343 # about.
1344 1344 if f is not None:
1345 1345 # Get the changenode this manifest belongs to
1346 1346 clnode = msng_mnfst_set[mnfstnode]
1347 1347 # Create the set of filenodes for the file if
1348 1348 # there isn't one already.
1349 1349 ndset = msng_filenode_set.setdefault(f, {})
1350 1350 # And set the filenode's changelog node to the
1351 1351 # manifest's if it hasn't been set already.
1352 1352 ndset.setdefault(fnode, clnode)
1353 1353 else:
1354 1354 # Otherwise we need a full manifest.
1355 1355 m = mnfst.read(mnfstnode)
1356 1356 # For every file in we care about.
1357 1357 for f in changedfiles:
1358 1358 fnode = m.get(f, None)
1359 1359 # If it's in the manifest
1360 1360 if fnode is not None:
1361 1361 # See comments above.
1362 1362 clnode = msng_mnfst_set[mnfstnode]
1363 1363 ndset = msng_filenode_set.setdefault(f, {})
1364 1364 ndset.setdefault(fnode, clnode)
1365 1365 # Remember the revision we hope to see next.
1366 1366 next_rev[0] = r + 1
1367 1367 return collect_msng_filenodes
1368 1368
1369 1369 # We have a list of filenodes we think we need for a file, lets remove
1370 1370 # all those we now the recipient must have.
1371 1371 def prune_filenodes(f, filerevlog):
1372 1372 msngset = msng_filenode_set[f]
1373 1373 hasset = {}
1374 1374 # If a 'missing' filenode thinks it belongs to a changenode we
1375 1375 # assume the recipient must have, then the recipient must have
1376 1376 # that filenode.
1377 1377 for n in msngset:
1378 1378 clnode = cl.node(filerevlog.linkrev(n))
1379 1379 if clnode in has_cl_set:
1380 1380 hasset[n] = 1
1381 1381 prune_parents(filerevlog, hasset, msngset)
1382 1382
1383 1383 # A function generator function that sets up the a context for the
1384 1384 # inner function.
1385 1385 def lookup_filenode_link_func(fname):
1386 1386 msngset = msng_filenode_set[fname]
1387 1387 # Lookup the changenode the filenode belongs to.
1388 1388 def lookup_filenode_link(fnode):
1389 1389 return msngset[fnode]
1390 1390 return lookup_filenode_link
1391 1391
1392 1392 # Now that we have all theses utility functions to help out and
1393 1393 # logically divide up the task, generate the group.
1394 1394 def gengroup():
1395 1395 # The set of changed files starts empty.
1396 1396 changedfiles = {}
1397 1397 # Create a changenode group generator that will call our functions
1398 1398 # back to lookup the owning changenode and collect information.
1399 1399 group = cl.group(msng_cl_lst, identity,
1400 1400 manifest_and_file_collector(changedfiles))
1401 1401 for chnk in group:
1402 1402 yield chnk
1403 1403
1404 1404 # The list of manifests has been collected by the generator
1405 1405 # calling our functions back.
1406 1406 prune_manifests()
1407 1407 msng_mnfst_lst = msng_mnfst_set.keys()
1408 1408 # Sort the manifestnodes by revision number.
1409 1409 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1410 1410 # Create a generator for the manifestnodes that calls our lookup
1411 1411 # and data collection functions back.
1412 1412 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1413 1413 filenode_collector(changedfiles))
1414 1414 for chnk in group:
1415 1415 yield chnk
1416 1416
1417 1417 # These are no longer needed, dereference and toss the memory for
1418 1418 # them.
1419 1419 msng_mnfst_lst = None
1420 1420 msng_mnfst_set.clear()
1421 1421
1422 1422 changedfiles = changedfiles.keys()
1423 1423 changedfiles.sort()
1424 1424 # Go through all our files in order sorted by name.
1425 1425 for fname in changedfiles:
1426 1426 filerevlog = self.file(fname)
1427 1427 # Toss out the filenodes that the recipient isn't really
1428 1428 # missing.
1429 1429 if msng_filenode_set.has_key(fname):
1430 1430 prune_filenodes(fname, filerevlog)
1431 1431 msng_filenode_lst = msng_filenode_set[fname].keys()
1432 1432 else:
1433 1433 msng_filenode_lst = []
1434 1434 # If any filenodes are left, generate the group for them,
1435 1435 # otherwise don't bother.
1436 1436 if len(msng_filenode_lst) > 0:
1437 1437 yield changegroup.genchunk(fname)
1438 1438 # Sort the filenodes by their revision #
1439 1439 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1440 1440 # Create a group generator and only pass in a changenode
1441 1441 # lookup function as we need to collect no information
1442 1442 # from filenodes.
1443 1443 group = filerevlog.group(msng_filenode_lst,
1444 1444 lookup_filenode_link_func(fname))
1445 1445 for chnk in group:
1446 1446 yield chnk
1447 1447 if msng_filenode_set.has_key(fname):
1448 1448 # Don't need this anymore, toss it to free memory.
1449 1449 del msng_filenode_set[fname]
1450 1450 # Signal that no more groups are left.
1451 1451 yield changegroup.closechunk()
1452 1452
1453 1453 if msng_cl_lst:
1454 1454 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1455 1455
1456 1456 return util.chunkbuffer(gengroup())
1457 1457
1458 1458 def changegroup(self, basenodes, source):
1459 1459 """Generate a changegroup of all nodes that we have that a recipient
1460 1460 doesn't.
1461 1461
1462 1462 This is much easier than the previous function as we can assume that
1463 1463 the recipient has any changenode we aren't sending them."""
1464 1464
1465 1465 self.hook('preoutgoing', throw=True, source=source)
1466 1466
1467 1467 cl = self.changelog
1468 1468 nodes = cl.nodesbetween(basenodes, None)[0]
1469 1469 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1470 1470
1471 1471 def identity(x):
1472 1472 return x
1473 1473
1474 1474 def gennodelst(revlog):
1475 1475 for r in xrange(0, revlog.count()):
1476 1476 n = revlog.node(r)
1477 1477 if revlog.linkrev(n) in revset:
1478 1478 yield n
1479 1479
1480 1480 def changed_file_collector(changedfileset):
1481 1481 def collect_changed_files(clnode):
1482 1482 c = cl.read(clnode)
1483 1483 for fname in c[3]:
1484 1484 changedfileset[fname] = 1
1485 1485 return collect_changed_files
1486 1486
1487 1487 def lookuprevlink_func(revlog):
1488 1488 def lookuprevlink(n):
1489 1489 return cl.node(revlog.linkrev(n))
1490 1490 return lookuprevlink
1491 1491
1492 1492 def gengroup():
1493 1493 # construct a list of all changed files
1494 1494 changedfiles = {}
1495 1495
1496 1496 for chnk in cl.group(nodes, identity,
1497 1497 changed_file_collector(changedfiles)):
1498 1498 yield chnk
1499 1499 changedfiles = changedfiles.keys()
1500 1500 changedfiles.sort()
1501 1501
1502 1502 mnfst = self.manifest
1503 1503 nodeiter = gennodelst(mnfst)
1504 1504 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1505 1505 yield chnk
1506 1506
1507 1507 for fname in changedfiles:
1508 1508 filerevlog = self.file(fname)
1509 1509 nodeiter = gennodelst(filerevlog)
1510 1510 nodeiter = list(nodeiter)
1511 1511 if nodeiter:
1512 1512 yield changegroup.genchunk(fname)
1513 1513 lookup = lookuprevlink_func(filerevlog)
1514 1514 for chnk in filerevlog.group(nodeiter, lookup):
1515 1515 yield chnk
1516 1516
1517 1517 yield changegroup.closechunk()
1518 1518
1519 1519 if nodes:
1520 1520 self.hook('outgoing', node=hex(nodes[0]), source=source)
1521 1521
1522 1522 return util.chunkbuffer(gengroup())
1523 1523
1524 1524 def addchangegroup(self, source, srctype):
1525 1525 """add changegroup to repo.
1526 1526 returns number of heads modified or added + 1."""
1527 1527
1528 1528 def csmap(x):
1529 1529 self.ui.debug(_("add changeset %s\n") % short(x))
1530 1530 return cl.count()
1531 1531
1532 1532 def revmap(x):
1533 1533 return cl.rev(x)
1534 1534
1535 1535 if not source:
1536 1536 return 0
1537 1537
1538 1538 self.hook('prechangegroup', throw=True, source=srctype)
1539 1539
1540 1540 changesets = files = revisions = 0
1541 1541
1542 1542 tr = self.transaction()
1543 1543
1544 1544 # write changelog data to temp files so concurrent readers will not see
1545 1545 # inconsistent view
1546 1546 cl = None
1547 1547 try:
1548 1548 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1549 1549
1550 1550 oldheads = len(cl.heads())
1551 1551
1552 1552 # pull off the changeset group
1553 1553 self.ui.status(_("adding changesets\n"))
1554 1554 cor = cl.count() - 1
1555 1555 chunkiter = changegroup.chunkiter(source)
1556 1556 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1557 1557 raise util.Abort(_("received changelog group is empty"))
1558 1558 cnr = cl.count() - 1
1559 1559 changesets = cnr - cor
1560 1560
1561 1561 # pull off the manifest group
1562 1562 self.ui.status(_("adding manifests\n"))
1563 1563 chunkiter = changegroup.chunkiter(source)
1564 1564 # no need to check for empty manifest group here:
1565 1565 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1566 1566 # no new manifest will be created and the manifest group will
1567 1567 # be empty during the pull
1568 1568 self.manifest.addgroup(chunkiter, revmap, tr)
1569 1569
1570 1570 # process the files
1571 1571 self.ui.status(_("adding file changes\n"))
1572 1572 while 1:
1573 1573 f = changegroup.getchunk(source)
1574 1574 if not f:
1575 1575 break
1576 1576 self.ui.debug(_("adding %s revisions\n") % f)
1577 1577 fl = self.file(f)
1578 1578 o = fl.count()
1579 1579 chunkiter = changegroup.chunkiter(source)
1580 1580 if fl.addgroup(chunkiter, revmap, tr) is None:
1581 1581 raise util.Abort(_("received file revlog group is empty"))
1582 1582 revisions += fl.count() - o
1583 1583 files += 1
1584 1584
1585 1585 cl.writedata()
1586 1586 finally:
1587 1587 if cl:
1588 1588 cl.cleanup()
1589 1589
1590 1590 # make changelog see real files again
1591 1591 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1592 1592 self.changelog.checkinlinesize(tr)
1593 1593
1594 1594 newheads = len(self.changelog.heads())
1595 1595 heads = ""
1596 1596 if oldheads and newheads != oldheads:
1597 1597 heads = _(" (%+d heads)") % (newheads - oldheads)
1598 1598
1599 1599 self.ui.status(_("added %d changesets"
1600 1600 " with %d changes to %d files%s\n")
1601 1601 % (changesets, revisions, files, heads))
1602 1602
1603 1603 if changesets > 0:
1604 1604 self.hook('pretxnchangegroup', throw=True,
1605 1605 node=hex(self.changelog.node(cor+1)), source=srctype)
1606 1606
1607 1607 tr.close()
1608 1608
1609 1609 if changesets > 0:
1610 1610 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1611 1611 source=srctype)
1612 1612
1613 1613 for i in range(cor + 1, cnr + 1):
1614 1614 self.hook("incoming", node=hex(self.changelog.node(i)),
1615 1615 source=srctype)
1616 1616
1617 1617 return newheads - oldheads + 1
1618 1618
1619 1619 def update(self, node, allow=False, force=False, choose=None,
1620 1620 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1621 1621 pl = self.dirstate.parents()
1622 1622 if not force and pl[1] != nullid:
1623 1623 raise util.Abort(_("outstanding uncommitted merges"))
1624 1624
1625 1625 err = False
1626 1626
1627 1627 p1, p2 = pl[0], node
1628 1628 pa = self.changelog.ancestor(p1, p2)
1629 1629 m1n = self.changelog.read(p1)[0]
1630 1630 m2n = self.changelog.read(p2)[0]
1631 1631 man = self.manifest.ancestor(m1n, m2n)
1632 1632 m1 = self.manifest.read(m1n)
1633 1633 mf1 = self.manifest.readflags(m1n)
1634 1634 m2 = self.manifest.read(m2n).copy()
1635 1635 mf2 = self.manifest.readflags(m2n)
1636 1636 ma = self.manifest.read(man)
1637 1637 mfa = self.manifest.readflags(man)
1638 1638
1639 1639 modified, added, removed, deleted, unknown = self.changes()
1640 1640
1641 1641 # is this a jump, or a merge? i.e. is there a linear path
1642 1642 # from p1 to p2?
1643 1643 linear_path = (pa == p1 or pa == p2)
1644 1644
1645 1645 if allow and linear_path:
1646 raise util.Abort(_("there is nothing to merge, "
1647 "just use 'hg update'"))
1646 raise util.Abort(_("there is nothing to merge, just use "
1647 "'hg update' or look at 'hg heads'"))
1648 1648 if allow and not forcemerge:
1649 1649 if modified or added or removed:
1650 1650 raise util.Abort(_("outstanding uncommitted changes"))
1651 1651
1652 1652 if not forcemerge and not force:
1653 1653 for f in unknown:
1654 1654 if f in m2:
1655 1655 t1 = self.wread(f)
1656 1656 t2 = self.file(f).read(m2[f])
1657 1657 if cmp(t1, t2) != 0:
1658 1658 raise util.Abort(_("'%s' already exists in the working"
1659 1659 " dir and differs from remote") % f)
1660 1660
1661 1661 # resolve the manifest to determine which files
1662 1662 # we care about merging
1663 1663 self.ui.note(_("resolving manifests\n"))
1664 1664 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1665 1665 (force, allow, moddirstate, linear_path))
1666 1666 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1667 1667 (short(man), short(m1n), short(m2n)))
1668 1668
1669 1669 merge = {}
1670 1670 get = {}
1671 1671 remove = []
1672 1672
1673 1673 # construct a working dir manifest
1674 1674 mw = m1.copy()
1675 1675 mfw = mf1.copy()
1676 1676 umap = dict.fromkeys(unknown)
1677 1677
1678 1678 for f in added + modified + unknown:
1679 1679 mw[f] = ""
1680 1680 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1681 1681
1682 1682 if moddirstate and not wlock:
1683 1683 wlock = self.wlock()
1684 1684
1685 1685 for f in deleted + removed:
1686 1686 if f in mw:
1687 1687 del mw[f]
1688 1688
1689 1689 # If we're jumping between revisions (as opposed to merging),
1690 1690 # and if neither the working directory nor the target rev has
1691 1691 # the file, then we need to remove it from the dirstate, to
1692 1692 # prevent the dirstate from listing the file when it is no
1693 1693 # longer in the manifest.
1694 1694 if moddirstate and linear_path and f not in m2:
1695 1695 self.dirstate.forget((f,))
1696 1696
1697 1697 # Compare manifests
1698 1698 for f, n in mw.iteritems():
1699 1699 if choose and not choose(f):
1700 1700 continue
1701 1701 if f in m2:
1702 1702 s = 0
1703 1703
1704 1704 # is the wfile new since m1, and match m2?
1705 1705 if f not in m1:
1706 1706 t1 = self.wread(f)
1707 1707 t2 = self.file(f).read(m2[f])
1708 1708 if cmp(t1, t2) == 0:
1709 1709 n = m2[f]
1710 1710 del t1, t2
1711 1711
1712 1712 # are files different?
1713 1713 if n != m2[f]:
1714 1714 a = ma.get(f, nullid)
1715 1715 # are both different from the ancestor?
1716 1716 if n != a and m2[f] != a:
1717 1717 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1718 1718 # merge executable bits
1719 1719 # "if we changed or they changed, change in merge"
1720 1720 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1721 1721 mode = ((a^b) | (a^c)) ^ a
1722 1722 merge[f] = (m1.get(f, nullid), m2[f], mode)
1723 1723 s = 1
1724 1724 # are we clobbering?
1725 1725 # is remote's version newer?
1726 1726 # or are we going back in time?
1727 1727 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1728 1728 self.ui.debug(_(" remote %s is newer, get\n") % f)
1729 1729 get[f] = m2[f]
1730 1730 s = 1
1731 1731 elif f in umap or f in added:
1732 1732 # this unknown file is the same as the checkout
1733 1733 # we need to reset the dirstate if the file was added
1734 1734 get[f] = m2[f]
1735 1735
1736 1736 if not s and mfw[f] != mf2[f]:
1737 1737 if force:
1738 1738 self.ui.debug(_(" updating permissions for %s\n") % f)
1739 1739 util.set_exec(self.wjoin(f), mf2[f])
1740 1740 else:
1741 1741 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1742 1742 mode = ((a^b) | (a^c)) ^ a
1743 1743 if mode != b:
1744 1744 self.ui.debug(_(" updating permissions for %s\n")
1745 1745 % f)
1746 1746 util.set_exec(self.wjoin(f), mode)
1747 1747 del m2[f]
1748 1748 elif f in ma:
1749 1749 if n != ma[f]:
1750 1750 r = _("d")
1751 1751 if not force and (linear_path or allow):
1752 1752 r = self.ui.prompt(
1753 1753 (_(" local changed %s which remote deleted\n") % f) +
1754 1754 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1755 1755 if r == _("d"):
1756 1756 remove.append(f)
1757 1757 else:
1758 1758 self.ui.debug(_("other deleted %s\n") % f)
1759 1759 remove.append(f) # other deleted it
1760 1760 else:
1761 1761 # file is created on branch or in working directory
1762 1762 if force and f not in umap:
1763 1763 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1764 1764 remove.append(f)
1765 1765 elif n == m1.get(f, nullid): # same as parent
1766 1766 if p2 == pa: # going backwards?
1767 1767 self.ui.debug(_("remote deleted %s\n") % f)
1768 1768 remove.append(f)
1769 1769 else:
1770 1770 self.ui.debug(_("local modified %s, keeping\n") % f)
1771 1771 else:
1772 1772 self.ui.debug(_("working dir created %s, keeping\n") % f)
1773 1773
1774 1774 for f, n in m2.iteritems():
1775 1775 if choose and not choose(f):
1776 1776 continue
1777 1777 if f[0] == "/":
1778 1778 continue
1779 1779 if f in ma and n != ma[f]:
1780 1780 r = _("k")
1781 1781 if not force and (linear_path or allow):
1782 1782 r = self.ui.prompt(
1783 1783 (_("remote changed %s which local deleted\n") % f) +
1784 1784 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1785 1785 if r == _("k"):
1786 1786 get[f] = n
1787 1787 elif f not in ma:
1788 1788 self.ui.debug(_("remote created %s\n") % f)
1789 1789 get[f] = n
1790 1790 else:
1791 1791 if force or p2 == pa: # going backwards?
1792 1792 self.ui.debug(_("local deleted %s, recreating\n") % f)
1793 1793 get[f] = n
1794 1794 else:
1795 1795 self.ui.debug(_("local deleted %s\n") % f)
1796 1796
1797 1797 del mw, m1, m2, ma
1798 1798
1799 1799 if force:
1800 1800 for f in merge:
1801 1801 get[f] = merge[f][1]
1802 1802 merge = {}
1803 1803
1804 1804 if linear_path or force:
1805 1805 # we don't need to do any magic, just jump to the new rev
1806 1806 branch_merge = False
1807 1807 p1, p2 = p2, nullid
1808 1808 else:
1809 1809 if not allow:
1810 1810 self.ui.status(_("this update spans a branch"
1811 1811 " affecting the following files:\n"))
1812 1812 fl = merge.keys() + get.keys()
1813 1813 fl.sort()
1814 1814 for f in fl:
1815 1815 cf = ""
1816 1816 if f in merge:
1817 1817 cf = _(" (resolve)")
1818 1818 self.ui.status(" %s%s\n" % (f, cf))
1819 1819 self.ui.warn(_("aborting update spanning branches!\n"))
1820 1820 self.ui.status(_("(use 'hg merge' to merge across branches"
1821 1821 " or 'hg update -C' to lose changes)\n"))
1822 1822 return 1
1823 1823 branch_merge = True
1824 1824
1825 1825 xp1 = hex(p1)
1826 1826 xp2 = hex(p2)
1827 1827 if p2 == nullid: xxp2 = ''
1828 1828 else: xxp2 = xp2
1829 1829
1830 1830 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1831 1831
1832 1832 # get the files we don't need to change
1833 1833 files = get.keys()
1834 1834 files.sort()
1835 1835 for f in files:
1836 1836 if f[0] == "/":
1837 1837 continue
1838 1838 self.ui.note(_("getting %s\n") % f)
1839 1839 t = self.file(f).read(get[f])
1840 1840 self.wwrite(f, t)
1841 1841 util.set_exec(self.wjoin(f), mf2[f])
1842 1842 if moddirstate:
1843 1843 if branch_merge:
1844 1844 self.dirstate.update([f], 'n', st_mtime=-1)
1845 1845 else:
1846 1846 self.dirstate.update([f], 'n')
1847 1847
1848 1848 # merge the tricky bits
1849 1849 failedmerge = []
1850 1850 files = merge.keys()
1851 1851 files.sort()
1852 1852 for f in files:
1853 1853 self.ui.status(_("merging %s\n") % f)
1854 1854 my, other, flag = merge[f]
1855 1855 ret = self.merge3(f, my, other, xp1, xp2)
1856 1856 if ret:
1857 1857 err = True
1858 1858 failedmerge.append(f)
1859 1859 util.set_exec(self.wjoin(f), flag)
1860 1860 if moddirstate:
1861 1861 if branch_merge:
1862 1862 # We've done a branch merge, mark this file as merged
1863 1863 # so that we properly record the merger later
1864 1864 self.dirstate.update([f], 'm')
1865 1865 else:
1866 1866 # We've update-merged a locally modified file, so
1867 1867 # we set the dirstate to emulate a normal checkout
1868 1868 # of that file some time in the past. Thus our
1869 1869 # merge will appear as a normal local file
1870 1870 # modification.
1871 1871 f_len = len(self.file(f).read(other))
1872 1872 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1873 1873
1874 1874 remove.sort()
1875 1875 for f in remove:
1876 1876 self.ui.note(_("removing %s\n") % f)
1877 1877 util.audit_path(f)
1878 1878 try:
1879 1879 util.unlink(self.wjoin(f))
1880 1880 except OSError, inst:
1881 1881 if inst.errno != errno.ENOENT:
1882 1882 self.ui.warn(_("update failed to remove %s: %s!\n") %
1883 1883 (f, inst.strerror))
1884 1884 if moddirstate:
1885 1885 if branch_merge:
1886 1886 self.dirstate.update(remove, 'r')
1887 1887 else:
1888 1888 self.dirstate.forget(remove)
1889 1889
1890 1890 if moddirstate:
1891 1891 self.dirstate.setparents(p1, p2)
1892 1892
1893 1893 if show_stats:
1894 1894 stats = ((len(get), _("updated")),
1895 1895 (len(merge) - len(failedmerge), _("merged")),
1896 1896 (len(remove), _("removed")),
1897 1897 (len(failedmerge), _("unresolved")))
1898 1898 note = ", ".join([_("%d files %s") % s for s in stats])
1899 1899 self.ui.status("%s\n" % note)
1900 1900 if moddirstate:
1901 1901 if branch_merge:
1902 1902 if failedmerge:
1903 1903 self.ui.status(_("There are unresolved merges,"
1904 1904 " you can redo the full merge using:\n"
1905 1905 " hg update -C %s\n"
1906 1906 " hg merge %s\n"
1907 1907 % (self.changelog.rev(p1),
1908 1908 self.changelog.rev(p2))))
1909 1909 else:
1910 1910 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1911 1911 elif failedmerge:
1912 1912 self.ui.status(_("There are unresolved merges with"
1913 1913 " locally modified files.\n"))
1914 1914
1915 1915 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1916 1916 return err
1917 1917
1918 1918 def merge3(self, fn, my, other, p1, p2):
1919 1919 """perform a 3-way merge in the working directory"""
1920 1920
1921 1921 def temp(prefix, node):
1922 1922 pre = "%s~%s." % (os.path.basename(fn), prefix)
1923 1923 (fd, name) = tempfile.mkstemp(prefix=pre)
1924 1924 f = os.fdopen(fd, "wb")
1925 1925 self.wwrite(fn, fl.read(node), f)
1926 1926 f.close()
1927 1927 return name
1928 1928
1929 1929 fl = self.file(fn)
1930 1930 base = fl.ancestor(my, other)
1931 1931 a = self.wjoin(fn)
1932 1932 b = temp("base", base)
1933 1933 c = temp("other", other)
1934 1934
1935 1935 self.ui.note(_("resolving %s\n") % fn)
1936 1936 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1937 1937 (fn, short(my), short(other), short(base)))
1938 1938
1939 1939 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1940 1940 or "hgmerge")
1941 1941 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1942 1942 environ={'HG_FILE': fn,
1943 1943 'HG_MY_NODE': p1,
1944 1944 'HG_OTHER_NODE': p2,
1945 1945 'HG_FILE_MY_NODE': hex(my),
1946 1946 'HG_FILE_OTHER_NODE': hex(other),
1947 1947 'HG_FILE_BASE_NODE': hex(base)})
1948 1948 if r:
1949 1949 self.ui.warn(_("merging %s failed!\n") % fn)
1950 1950
1951 1951 os.unlink(b)
1952 1952 os.unlink(c)
1953 1953 return r
1954 1954
1955 1955 def verify(self):
1956 1956 filelinkrevs = {}
1957 1957 filenodes = {}
1958 1958 changesets = revisions = files = 0
1959 1959 errors = [0]
1960 1960 warnings = [0]
1961 1961 neededmanifests = {}
1962 1962
1963 1963 def err(msg):
1964 1964 self.ui.warn(msg + "\n")
1965 1965 errors[0] += 1
1966 1966
1967 1967 def warn(msg):
1968 1968 self.ui.warn(msg + "\n")
1969 1969 warnings[0] += 1
1970 1970
1971 1971 def checksize(obj, name):
1972 1972 d = obj.checksize()
1973 1973 if d[0]:
1974 1974 err(_("%s data length off by %d bytes") % (name, d[0]))
1975 1975 if d[1]:
1976 1976 err(_("%s index contains %d extra bytes") % (name, d[1]))
1977 1977
1978 1978 def checkversion(obj, name):
1979 1979 if obj.version != revlog.REVLOGV0:
1980 1980 if not revlogv1:
1981 1981 warn(_("warning: `%s' uses revlog format 1") % name)
1982 1982 elif revlogv1:
1983 1983 warn(_("warning: `%s' uses revlog format 0") % name)
1984 1984
1985 1985 revlogv1 = self.revlogversion != revlog.REVLOGV0
1986 1986 if self.ui.verbose or revlogv1 != self.revlogv1:
1987 1987 self.ui.status(_("repository uses revlog format %d\n") %
1988 1988 (revlogv1 and 1 or 0))
1989 1989
1990 1990 seen = {}
1991 1991 self.ui.status(_("checking changesets\n"))
1992 1992 checksize(self.changelog, "changelog")
1993 1993
1994 1994 for i in range(self.changelog.count()):
1995 1995 changesets += 1
1996 1996 n = self.changelog.node(i)
1997 1997 l = self.changelog.linkrev(n)
1998 1998 if l != i:
1999 1999 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
2000 2000 if n in seen:
2001 2001 err(_("duplicate changeset at revision %d") % i)
2002 2002 seen[n] = 1
2003 2003
2004 2004 for p in self.changelog.parents(n):
2005 2005 if p not in self.changelog.nodemap:
2006 2006 err(_("changeset %s has unknown parent %s") %
2007 2007 (short(n), short(p)))
2008 2008 try:
2009 2009 changes = self.changelog.read(n)
2010 2010 except KeyboardInterrupt:
2011 2011 self.ui.warn(_("interrupted"))
2012 2012 raise
2013 2013 except Exception, inst:
2014 2014 err(_("unpacking changeset %s: %s") % (short(n), inst))
2015 2015 continue
2016 2016
2017 2017 neededmanifests[changes[0]] = n
2018 2018
2019 2019 for f in changes[3]:
2020 2020 filelinkrevs.setdefault(f, []).append(i)
2021 2021
2022 2022 seen = {}
2023 2023 self.ui.status(_("checking manifests\n"))
2024 2024 checkversion(self.manifest, "manifest")
2025 2025 checksize(self.manifest, "manifest")
2026 2026
2027 2027 for i in range(self.manifest.count()):
2028 2028 n = self.manifest.node(i)
2029 2029 l = self.manifest.linkrev(n)
2030 2030
2031 2031 if l < 0 or l >= self.changelog.count():
2032 2032 err(_("bad manifest link (%d) at revision %d") % (l, i))
2033 2033
2034 2034 if n in neededmanifests:
2035 2035 del neededmanifests[n]
2036 2036
2037 2037 if n in seen:
2038 2038 err(_("duplicate manifest at revision %d") % i)
2039 2039
2040 2040 seen[n] = 1
2041 2041
2042 2042 for p in self.manifest.parents(n):
2043 2043 if p not in self.manifest.nodemap:
2044 2044 err(_("manifest %s has unknown parent %s") %
2045 2045 (short(n), short(p)))
2046 2046
2047 2047 try:
2048 2048 delta = mdiff.patchtext(self.manifest.delta(n))
2049 2049 except KeyboardInterrupt:
2050 2050 self.ui.warn(_("interrupted"))
2051 2051 raise
2052 2052 except Exception, inst:
2053 2053 err(_("unpacking manifest %s: %s") % (short(n), inst))
2054 2054 continue
2055 2055
2056 2056 try:
2057 2057 ff = [ l.split('\0') for l in delta.splitlines() ]
2058 2058 for f, fn in ff:
2059 2059 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2060 2060 except (ValueError, TypeError), inst:
2061 2061 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2062 2062
2063 2063 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2064 2064
2065 2065 for m, c in neededmanifests.items():
2066 2066 err(_("Changeset %s refers to unknown manifest %s") %
2067 2067 (short(m), short(c)))
2068 2068 del neededmanifests
2069 2069
2070 2070 for f in filenodes:
2071 2071 if f not in filelinkrevs:
2072 2072 err(_("file %s in manifest but not in changesets") % f)
2073 2073
2074 2074 for f in filelinkrevs:
2075 2075 if f not in filenodes:
2076 2076 err(_("file %s in changeset but not in manifest") % f)
2077 2077
2078 2078 self.ui.status(_("checking files\n"))
2079 2079 ff = filenodes.keys()
2080 2080 ff.sort()
2081 2081 for f in ff:
2082 2082 if f == "/dev/null":
2083 2083 continue
2084 2084 files += 1
2085 2085 if not f:
2086 2086 err(_("file without name in manifest %s") % short(n))
2087 2087 continue
2088 2088 fl = self.file(f)
2089 2089 checkversion(fl, f)
2090 2090 checksize(fl, f)
2091 2091
2092 2092 nodes = {nullid: 1}
2093 2093 seen = {}
2094 2094 for i in range(fl.count()):
2095 2095 revisions += 1
2096 2096 n = fl.node(i)
2097 2097
2098 2098 if n in seen:
2099 2099 err(_("%s: duplicate revision %d") % (f, i))
2100 2100 if n not in filenodes[f]:
2101 2101 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2102 2102 else:
2103 2103 del filenodes[f][n]
2104 2104
2105 2105 flr = fl.linkrev(n)
2106 2106 if flr not in filelinkrevs.get(f, []):
2107 2107 err(_("%s:%s points to unexpected changeset %d")
2108 2108 % (f, short(n), flr))
2109 2109 else:
2110 2110 filelinkrevs[f].remove(flr)
2111 2111
2112 2112 # verify contents
2113 2113 try:
2114 2114 t = fl.read(n)
2115 2115 except KeyboardInterrupt:
2116 2116 self.ui.warn(_("interrupted"))
2117 2117 raise
2118 2118 except Exception, inst:
2119 2119 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2120 2120
2121 2121 # verify parents
2122 2122 (p1, p2) = fl.parents(n)
2123 2123 if p1 not in nodes:
2124 2124 err(_("file %s:%s unknown parent 1 %s") %
2125 2125 (f, short(n), short(p1)))
2126 2126 if p2 not in nodes:
2127 2127 err(_("file %s:%s unknown parent 2 %s") %
2128 2128 (f, short(n), short(p1)))
2129 2129 nodes[n] = 1
2130 2130
2131 2131 # cross-check
2132 2132 for node in filenodes[f]:
2133 2133 err(_("node %s in manifests not in %s") % (hex(node), f))
2134 2134
2135 2135 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2136 2136 (files, changesets, revisions))
2137 2137
2138 2138 if warnings[0]:
2139 2139 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2140 2140 if errors[0]:
2141 2141 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2142 2142 return 1
2143 2143
2144 2144 # used to avoid circular references so destructors work
2145 2145 def aftertrans(base):
2146 2146 p = base
2147 2147 def a():
2148 2148 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2149 2149 util.rename(os.path.join(p, "journal.dirstate"),
2150 2150 os.path.join(p, "undo.dirstate"))
2151 2151 return a
2152 2152
General Comments 0
You need to be logged in to leave comments. Login now