##// END OF EJS Templates
Allow repo lock to be passed in to localrepo.commit for performance
mason@suse.com -
r1807:f1f43ea2 default
parent child Browse files
Show More
@@ -1,1896 +1,1897 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14 14
15 15 class localrepository(object):
16 16 def __del__(self):
17 17 self.transhandle = None
18 18 def __init__(self, ui, path=None, create=0):
19 19 if not path:
20 20 p = os.getcwd()
21 21 while not os.path.isdir(os.path.join(p, ".hg")):
22 22 oldp = p
23 23 p = os.path.dirname(p)
24 24 if p == oldp:
25 25 raise repo.RepoError(_("no repo found"))
26 26 path = p
27 27 self.path = os.path.join(path, ".hg")
28 28
29 29 if not create and not os.path.isdir(self.path):
30 30 raise repo.RepoError(_("repository %s not found") % path)
31 31
32 32 self.root = os.path.abspath(path)
33 33 self.ui = ui
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.manifest = manifest.manifest(self.opener)
37 37 self.changelog = changelog.changelog(self.opener)
38 38 self.tagscache = None
39 39 self.nodetagscache = None
40 40 self.encodepats = None
41 41 self.decodepats = None
42 42 self.transhandle = None
43 43
44 44 if create:
45 45 os.mkdir(self.path)
46 46 os.mkdir(self.join("data"))
47 47
48 48 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
49 49 try:
50 50 self.ui.readconfig(self.join("hgrc"))
51 51 except IOError:
52 52 pass
53 53
54 54 def hook(self, name, throw=False, **args):
55 55 def runhook(name, cmd):
56 56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
57 57 old = {}
58 58 for k, v in args.items():
59 59 k = k.upper()
60 60 old['HG_' + k] = os.environ.get(k, None)
61 61 old[k] = os.environ.get(k, None)
62 62 os.environ['HG_' + k] = str(v)
63 63 os.environ[k] = str(v)
64 64
65 65 try:
66 66 # Hooks run in the repository root
67 67 olddir = os.getcwd()
68 68 os.chdir(self.root)
69 69 r = os.system(cmd)
70 70 finally:
71 71 for k, v in old.items():
72 72 if v is not None:
73 73 os.environ[k] = v
74 74 else:
75 75 del os.environ[k]
76 76
77 77 os.chdir(olddir)
78 78
79 79 if r:
80 80 desc, r = util.explain_exit(r)
81 81 if throw:
82 82 raise util.Abort(_('%s hook %s') % (name, desc))
83 83 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
84 84 return False
85 85 return True
86 86
87 87 r = True
88 88 for hname, cmd in self.ui.configitems("hooks"):
89 89 s = hname.split(".")
90 90 if s[0] == name and cmd:
91 91 r = runhook(hname, cmd) and r
92 92 return r
93 93
94 94 def tags(self):
95 95 '''return a mapping of tag to node'''
96 96 if not self.tagscache:
97 97 self.tagscache = {}
98 98 def addtag(self, k, n):
99 99 try:
100 100 bin_n = bin(n)
101 101 except TypeError:
102 102 bin_n = ''
103 103 self.tagscache[k.strip()] = bin_n
104 104
105 105 try:
106 106 # read each head of the tags file, ending with the tip
107 107 # and add each tag found to the map, with "newer" ones
108 108 # taking precedence
109 109 fl = self.file(".hgtags")
110 110 h = fl.heads()
111 111 h.reverse()
112 112 for r in h:
113 113 for l in fl.read(r).splitlines():
114 114 if l:
115 115 n, k = l.split(" ", 1)
116 116 addtag(self, k, n)
117 117 except KeyError:
118 118 pass
119 119
120 120 try:
121 121 f = self.opener("localtags")
122 122 for l in f:
123 123 n, k = l.split(" ", 1)
124 124 addtag(self, k, n)
125 125 except IOError:
126 126 pass
127 127
128 128 self.tagscache['tip'] = self.changelog.tip()
129 129
130 130 return self.tagscache
131 131
132 132 def tagslist(self):
133 133 '''return a list of tags ordered by revision'''
134 134 l = []
135 135 for t, n in self.tags().items():
136 136 try:
137 137 r = self.changelog.rev(n)
138 138 except:
139 139 r = -2 # sort to the beginning of the list if unknown
140 140 l.append((r, t, n))
141 141 l.sort()
142 142 return [(t, n) for r, t, n in l]
143 143
144 144 def nodetags(self, node):
145 145 '''return the tags associated with a node'''
146 146 if not self.nodetagscache:
147 147 self.nodetagscache = {}
148 148 for t, n in self.tags().items():
149 149 self.nodetagscache.setdefault(n, []).append(t)
150 150 return self.nodetagscache.get(node, [])
151 151
152 152 def lookup(self, key):
153 153 try:
154 154 return self.tags()[key]
155 155 except KeyError:
156 156 try:
157 157 return self.changelog.lookup(key)
158 158 except:
159 159 raise repo.RepoError(_("unknown revision '%s'") % key)
160 160
161 161 def dev(self):
162 162 return os.stat(self.path).st_dev
163 163
164 164 def local(self):
165 165 return True
166 166
167 167 def join(self, f):
168 168 return os.path.join(self.path, f)
169 169
170 170 def wjoin(self, f):
171 171 return os.path.join(self.root, f)
172 172
173 173 def file(self, f):
174 174 if f[0] == '/':
175 175 f = f[1:]
176 176 return filelog.filelog(self.opener, f)
177 177
178 178 def getcwd(self):
179 179 return self.dirstate.getcwd()
180 180
181 181 def wfile(self, f, mode='r'):
182 182 return self.wopener(f, mode)
183 183
184 184 def wread(self, filename):
185 185 if self.encodepats == None:
186 186 l = []
187 187 for pat, cmd in self.ui.configitems("encode"):
188 188 mf = util.matcher("", "/", [pat], [], [])[1]
189 189 l.append((mf, cmd))
190 190 self.encodepats = l
191 191
192 192 data = self.wopener(filename, 'r').read()
193 193
194 194 for mf, cmd in self.encodepats:
195 195 if mf(filename):
196 196 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
197 197 data = util.filter(data, cmd)
198 198 break
199 199
200 200 return data
201 201
202 202 def wwrite(self, filename, data, fd=None):
203 203 if self.decodepats == None:
204 204 l = []
205 205 for pat, cmd in self.ui.configitems("decode"):
206 206 mf = util.matcher("", "/", [pat], [], [])[1]
207 207 l.append((mf, cmd))
208 208 self.decodepats = l
209 209
210 210 for mf, cmd in self.decodepats:
211 211 if mf(filename):
212 212 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
213 213 data = util.filter(data, cmd)
214 214 break
215 215
216 216 if fd:
217 217 return fd.write(data)
218 218 return self.wopener(filename, 'w').write(data)
219 219
220 220 def transaction(self):
221 221 tr = self.transhandle
222 222 if tr != None and tr.running():
223 223 return tr.nest()
224 224
225 225 # save dirstate for undo
226 226 try:
227 227 ds = self.opener("dirstate").read()
228 228 except IOError:
229 229 ds = ""
230 230 self.opener("journal.dirstate", "w").write(ds)
231 231
232 232 tr = transaction.transaction(self.ui.warn, self.opener,
233 233 self.join("journal"),
234 234 aftertrans(self.path))
235 235 self.transhandle = tr
236 236 return tr
237 237
238 238 def recover(self):
239 239 l = self.lock()
240 240 if os.path.exists(self.join("journal")):
241 241 self.ui.status(_("rolling back interrupted transaction\n"))
242 242 transaction.rollback(self.opener, self.join("journal"))
243 243 self.reload()
244 244 return True
245 245 else:
246 246 self.ui.warn(_("no interrupted transaction available\n"))
247 247 return False
248 248
249 249 def undo(self, wlock=None):
250 250 if not wlock:
251 251 wlock = self.wlock()
252 252 l = self.lock()
253 253 if os.path.exists(self.join("undo")):
254 254 self.ui.status(_("rolling back last transaction\n"))
255 255 transaction.rollback(self.opener, self.join("undo"))
256 256 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
257 257 self.reload()
258 258 self.wreload()
259 259 else:
260 260 self.ui.warn(_("no undo information available\n"))
261 261
262 262 def wreload(self):
263 263 self.dirstate.read()
264 264
265 265 def reload(self):
266 266 self.changelog.load()
267 267 self.manifest.load()
268 268 self.tagscache = None
269 269 self.nodetagscache = None
270 270
271 271 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
272 272 try:
273 273 l = lock.lock(self.join(lockname), 0, releasefn)
274 274 except lock.LockHeld, inst:
275 275 if not wait:
276 276 raise inst
277 277 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
278 278 try:
279 279 # default to 600 seconds timeout
280 280 l = lock.lock(self.join(lockname),
281 281 int(self.ui.config("ui", "timeout") or 600),
282 282 releasefn)
283 283 except lock.LockHeld, inst:
284 284 raise util.Abort(_("timeout while waiting for "
285 285 "lock held by %s") % inst.args[0])
286 286 if acquirefn:
287 287 acquirefn()
288 288 return l
289 289
290 290 def lock(self, wait=1):
291 291 return self.do_lock("lock", wait, acquirefn=self.reload)
292 292
293 293 def wlock(self, wait=1):
294 294 return self.do_lock("wlock", wait,
295 295 self.dirstate.write,
296 296 self.wreload)
297 297
298 298 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
299 299 "determine whether a new filenode is needed"
300 300 fp1 = manifest1.get(filename, nullid)
301 301 fp2 = manifest2.get(filename, nullid)
302 302
303 303 if fp2 != nullid:
304 304 # is one parent an ancestor of the other?
305 305 fpa = filelog.ancestor(fp1, fp2)
306 306 if fpa == fp1:
307 307 fp1, fp2 = fp2, nullid
308 308 elif fpa == fp2:
309 309 fp2 = nullid
310 310
311 311 # is the file unmodified from the parent? report existing entry
312 312 if fp2 == nullid and text == filelog.read(fp1):
313 313 return (fp1, None, None)
314 314
315 315 return (None, fp1, fp2)
316 316
317 317 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
318 318 orig_parent = self.dirstate.parents()[0] or nullid
319 319 p1 = p1 or self.dirstate.parents()[0] or nullid
320 320 p2 = p2 or self.dirstate.parents()[1] or nullid
321 321 c1 = self.changelog.read(p1)
322 322 c2 = self.changelog.read(p2)
323 323 m1 = self.manifest.read(c1[0])
324 324 mf1 = self.manifest.readflags(c1[0])
325 325 m2 = self.manifest.read(c2[0])
326 326 changed = []
327 327
328 328 if orig_parent == p1:
329 329 update_dirstate = 1
330 330 else:
331 331 update_dirstate = 0
332 332
333 333 if not wlock:
334 334 wlock = self.wlock()
335 335 l = self.lock()
336 336 tr = self.transaction()
337 337 mm = m1.copy()
338 338 mfm = mf1.copy()
339 339 linkrev = self.changelog.count()
340 340 for f in files:
341 341 try:
342 342 t = self.wread(f)
343 343 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
344 344 r = self.file(f)
345 345 mfm[f] = tm
346 346
347 347 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
348 348 if entry:
349 349 mm[f] = entry
350 350 continue
351 351
352 352 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
353 353 changed.append(f)
354 354 if update_dirstate:
355 355 self.dirstate.update([f], "n")
356 356 except IOError:
357 357 try:
358 358 del mm[f]
359 359 del mfm[f]
360 360 if update_dirstate:
361 361 self.dirstate.forget([f])
362 362 except:
363 363 # deleted from p2?
364 364 pass
365 365
366 366 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
367 367 user = user or self.ui.username()
368 368 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
369 369 tr.close()
370 370 if update_dirstate:
371 371 self.dirstate.setparents(n, nullid)
372 372
373 373 def commit(self, files=None, text="", user=None, date=None,
374 match=util.always, force=False, wlock=None):
374 match=util.always, force=False, lock=None, wlock=None):
375 375 commit = []
376 376 remove = []
377 377 changed = []
378 378
379 379 if files:
380 380 for f in files:
381 381 s = self.dirstate.state(f)
382 382 if s in 'nmai':
383 383 commit.append(f)
384 384 elif s == 'r':
385 385 remove.append(f)
386 386 else:
387 387 self.ui.warn(_("%s not tracked!\n") % f)
388 388 else:
389 389 modified, added, removed, deleted, unknown = self.changes(match=match)
390 390 commit = modified + added
391 391 remove = removed
392 392
393 393 p1, p2 = self.dirstate.parents()
394 394 c1 = self.changelog.read(p1)
395 395 c2 = self.changelog.read(p2)
396 396 m1 = self.manifest.read(c1[0])
397 397 mf1 = self.manifest.readflags(c1[0])
398 398 m2 = self.manifest.read(c2[0])
399 399
400 400 if not commit and not remove and not force and p2 == nullid:
401 401 self.ui.status(_("nothing changed\n"))
402 402 return None
403 403
404 404 xp1 = hex(p1)
405 405 if p2 == nullid: xp2 = ''
406 406 else: xp2 = hex(p2)
407 407
408 408 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
409 409
410 410 if not wlock:
411 411 wlock = self.wlock()
412 l = self.lock()
412 if not lock:
413 lock = self.lock()
413 414 tr = self.transaction()
414 415
415 416 # check in files
416 417 new = {}
417 418 linkrev = self.changelog.count()
418 419 commit.sort()
419 420 for f in commit:
420 421 self.ui.note(f + "\n")
421 422 try:
422 423 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
423 424 t = self.wread(f)
424 425 except IOError:
425 426 self.ui.warn(_("trouble committing %s!\n") % f)
426 427 raise
427 428
428 429 r = self.file(f)
429 430
430 431 meta = {}
431 432 cp = self.dirstate.copied(f)
432 433 if cp:
433 434 meta["copy"] = cp
434 435 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
435 436 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
436 437 fp1, fp2 = nullid, nullid
437 438 else:
438 439 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
439 440 if entry:
440 441 new[f] = entry
441 442 continue
442 443
443 444 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
444 445 # remember what we've added so that we can later calculate
445 446 # the files to pull from a set of changesets
446 447 changed.append(f)
447 448
448 449 # update manifest
449 450 m1 = m1.copy()
450 451 m1.update(new)
451 452 for f in remove:
452 453 if f in m1:
453 454 del m1[f]
454 455 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
455 456 (new, remove))
456 457
457 458 # add changeset
458 459 new = new.keys()
459 460 new.sort()
460 461
461 462 if not text:
462 463 edittext = [""]
463 464 if p2 != nullid:
464 465 edittext.append("HG: branch merge")
465 466 edittext.extend(["HG: changed %s" % f for f in changed])
466 467 edittext.extend(["HG: removed %s" % f for f in remove])
467 468 if not changed and not remove:
468 469 edittext.append("HG: no files changed")
469 470 edittext.append("")
470 471 # run editor in the repository root
471 472 olddir = os.getcwd()
472 473 os.chdir(self.root)
473 474 edittext = self.ui.edit("\n".join(edittext))
474 475 os.chdir(olddir)
475 476 if not edittext.rstrip():
476 477 return None
477 478 text = edittext
478 479
479 480 user = user or self.ui.username()
480 481 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
481 482 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
482 483 parent2=xp2)
483 484 tr.close()
484 485
485 486 self.dirstate.setparents(n)
486 487 self.dirstate.update(new, "n")
487 488 self.dirstate.forget(remove)
488 489
489 490 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
490 491 return n
491 492
492 493 def walk(self, node=None, files=[], match=util.always):
493 494 if node:
494 495 fdict = dict.fromkeys(files)
495 496 for fn in self.manifest.read(self.changelog.read(node)[0]):
496 497 fdict.pop(fn, None)
497 498 if match(fn):
498 499 yield 'm', fn
499 500 for fn in fdict:
500 501 self.ui.warn(_('%s: No such file in rev %s\n') % (
501 502 util.pathto(self.getcwd(), fn), short(node)))
502 503 else:
503 504 for src, fn in self.dirstate.walk(files, match):
504 505 yield src, fn
505 506
506 507 def changes(self, node1=None, node2=None, files=[], match=util.always,
507 508 wlock=None):
508 509 """return changes between two nodes or node and working directory
509 510
510 511 If node1 is None, use the first dirstate parent instead.
511 512 If node2 is None, compare node1 with working directory.
512 513 """
513 514
514 515 def fcmp(fn, mf):
515 516 t1 = self.wread(fn)
516 517 t2 = self.file(fn).read(mf.get(fn, nullid))
517 518 return cmp(t1, t2)
518 519
519 520 def mfmatches(node):
520 521 change = self.changelog.read(node)
521 522 mf = dict(self.manifest.read(change[0]))
522 523 for fn in mf.keys():
523 524 if not match(fn):
524 525 del mf[fn]
525 526 return mf
526 527
527 528 if node1:
528 529 # read the manifest from node1 before the manifest from node2,
529 530 # so that we'll hit the manifest cache if we're going through
530 531 # all the revisions in parent->child order.
531 532 mf1 = mfmatches(node1)
532 533
533 534 # are we comparing the working directory?
534 535 if not node2:
535 536 if not wlock:
536 537 try:
537 538 wlock = self.wlock(wait=0)
538 539 except lock.LockException:
539 540 wlock = None
540 541 lookup, modified, added, removed, deleted, unknown = (
541 542 self.dirstate.changes(files, match))
542 543
543 544 # are we comparing working dir against its parent?
544 545 if not node1:
545 546 if lookup:
546 547 # do a full compare of any files that might have changed
547 548 mf2 = mfmatches(self.dirstate.parents()[0])
548 549 for f in lookup:
549 550 if fcmp(f, mf2):
550 551 modified.append(f)
551 552 elif wlock is not None:
552 553 self.dirstate.update([f], "n")
553 554 else:
554 555 # we are comparing working dir against non-parent
555 556 # generate a pseudo-manifest for the working dir
556 557 mf2 = mfmatches(self.dirstate.parents()[0])
557 558 for f in lookup + modified + added:
558 559 mf2[f] = ""
559 560 for f in removed:
560 561 if f in mf2:
561 562 del mf2[f]
562 563 else:
563 564 # we are comparing two revisions
564 565 deleted, unknown = [], []
565 566 mf2 = mfmatches(node2)
566 567
567 568 if node1:
568 569 # flush lists from dirstate before comparing manifests
569 570 modified, added = [], []
570 571
571 572 for fn in mf2:
572 573 if mf1.has_key(fn):
573 574 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
574 575 modified.append(fn)
575 576 del mf1[fn]
576 577 else:
577 578 added.append(fn)
578 579
579 580 removed = mf1.keys()
580 581
581 582 # sort and return results:
582 583 for l in modified, added, removed, deleted, unknown:
583 584 l.sort()
584 585 return (modified, added, removed, deleted, unknown)
585 586
586 587 def add(self, list, wlock=None):
587 588 if not wlock:
588 589 wlock = self.wlock()
589 590 for f in list:
590 591 p = self.wjoin(f)
591 592 if not os.path.exists(p):
592 593 self.ui.warn(_("%s does not exist!\n") % f)
593 594 elif not os.path.isfile(p):
594 595 self.ui.warn(_("%s not added: only files supported currently\n")
595 596 % f)
596 597 elif self.dirstate.state(f) in 'an':
597 598 self.ui.warn(_("%s already tracked!\n") % f)
598 599 else:
599 600 self.dirstate.update([f], "a")
600 601
601 602 def forget(self, list, wlock=None):
602 603 if not wlock:
603 604 wlock = self.wlock()
604 605 for f in list:
605 606 if self.dirstate.state(f) not in 'ai':
606 607 self.ui.warn(_("%s not added!\n") % f)
607 608 else:
608 609 self.dirstate.forget([f])
609 610
610 611 def remove(self, list, unlink=False, wlock=None):
611 612 if unlink:
612 613 for f in list:
613 614 try:
614 615 util.unlink(self.wjoin(f))
615 616 except OSError, inst:
616 617 if inst.errno != errno.ENOENT:
617 618 raise
618 619 if not wlock:
619 620 wlock = self.wlock()
620 621 for f in list:
621 622 p = self.wjoin(f)
622 623 if os.path.exists(p):
623 624 self.ui.warn(_("%s still exists!\n") % f)
624 625 elif self.dirstate.state(f) == 'a':
625 626 self.dirstate.forget([f])
626 627 elif f not in self.dirstate:
627 628 self.ui.warn(_("%s not tracked!\n") % f)
628 629 else:
629 630 self.dirstate.update([f], "r")
630 631
631 632 def undelete(self, list, wlock=None):
632 633 p = self.dirstate.parents()[0]
633 634 mn = self.changelog.read(p)[0]
634 635 mf = self.manifest.readflags(mn)
635 636 m = self.manifest.read(mn)
636 637 if not wlock:
637 638 wlock = self.wlock()
638 639 for f in list:
639 640 if self.dirstate.state(f) not in "r":
640 641 self.ui.warn("%s not removed!\n" % f)
641 642 else:
642 643 t = self.file(f).read(m[f])
643 644 self.wwrite(f, t)
644 645 util.set_exec(self.wjoin(f), mf[f])
645 646 self.dirstate.update([f], "n")
646 647
647 648 def copy(self, source, dest, wlock=None):
648 649 p = self.wjoin(dest)
649 650 if not os.path.exists(p):
650 651 self.ui.warn(_("%s does not exist!\n") % dest)
651 652 elif not os.path.isfile(p):
652 653 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
653 654 else:
654 655 if not wlock:
655 656 wlock = self.wlock()
656 657 if self.dirstate.state(dest) == '?':
657 658 self.dirstate.update([dest], "a")
658 659 self.dirstate.copy(source, dest)
659 660
660 661 def heads(self, start=None):
661 662 heads = self.changelog.heads(start)
662 663 # sort the output in rev descending order
663 664 heads = [(-self.changelog.rev(h), h) for h in heads]
664 665 heads.sort()
665 666 return [n for (r, n) in heads]
666 667
667 668 # branchlookup returns a dict giving a list of branches for
668 669 # each head. A branch is defined as the tag of a node or
669 670 # the branch of the node's parents. If a node has multiple
670 671 # branch tags, tags are eliminated if they are visible from other
671 672 # branch tags.
672 673 #
673 674 # So, for this graph: a->b->c->d->e
674 675 # \ /
675 676 # aa -----/
676 677 # a has tag 2.6.12
677 678 # d has tag 2.6.13
678 679 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
679 680 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
680 681 # from the list.
681 682 #
682 683 # It is possible that more than one head will have the same branch tag.
683 684 # callers need to check the result for multiple heads under the same
684 685 # branch tag if that is a problem for them (ie checkout of a specific
685 686 # branch).
686 687 #
687 688 # passing in a specific branch will limit the depth of the search
688 689 # through the parents. It won't limit the branches returned in the
689 690 # result though.
690 691 def branchlookup(self, heads=None, branch=None):
691 692 if not heads:
692 693 heads = self.heads()
693 694 headt = [ h for h in heads ]
694 695 chlog = self.changelog
695 696 branches = {}
696 697 merges = []
697 698 seenmerge = {}
698 699
699 700 # traverse the tree once for each head, recording in the branches
700 701 # dict which tags are visible from this head. The branches
701 702 # dict also records which tags are visible from each tag
702 703 # while we traverse.
703 704 while headt or merges:
704 705 if merges:
705 706 n, found = merges.pop()
706 707 visit = [n]
707 708 else:
708 709 h = headt.pop()
709 710 visit = [h]
710 711 found = [h]
711 712 seen = {}
712 713 while visit:
713 714 n = visit.pop()
714 715 if n in seen:
715 716 continue
716 717 pp = chlog.parents(n)
717 718 tags = self.nodetags(n)
718 719 if tags:
719 720 for x in tags:
720 721 if x == 'tip':
721 722 continue
722 723 for f in found:
723 724 branches.setdefault(f, {})[n] = 1
724 725 branches.setdefault(n, {})[n] = 1
725 726 break
726 727 if n not in found:
727 728 found.append(n)
728 729 if branch in tags:
729 730 continue
730 731 seen[n] = 1
731 732 if pp[1] != nullid and n not in seenmerge:
732 733 merges.append((pp[1], [x for x in found]))
733 734 seenmerge[n] = 1
734 735 if pp[0] != nullid:
735 736 visit.append(pp[0])
736 737 # traverse the branches dict, eliminating branch tags from each
737 738 # head that are visible from another branch tag for that head.
738 739 out = {}
739 740 viscache = {}
740 741 for h in heads:
741 742 def visible(node):
742 743 if node in viscache:
743 744 return viscache[node]
744 745 ret = {}
745 746 visit = [node]
746 747 while visit:
747 748 x = visit.pop()
748 749 if x in viscache:
749 750 ret.update(viscache[x])
750 751 elif x not in ret:
751 752 ret[x] = 1
752 753 if x in branches:
753 754 visit[len(visit):] = branches[x].keys()
754 755 viscache[node] = ret
755 756 return ret
756 757 if h not in branches:
757 758 continue
758 759 # O(n^2), but somewhat limited. This only searches the
759 760 # tags visible from a specific head, not all the tags in the
760 761 # whole repo.
761 762 for b in branches[h]:
762 763 vis = False
763 764 for bb in branches[h].keys():
764 765 if b != bb:
765 766 if b in visible(bb):
766 767 vis = True
767 768 break
768 769 if not vis:
769 770 l = out.setdefault(h, [])
770 771 l[len(l):] = self.nodetags(b)
771 772 return out
772 773
773 774 def branches(self, nodes):
774 775 if not nodes:
775 776 nodes = [self.changelog.tip()]
776 777 b = []
777 778 for n in nodes:
778 779 t = n
779 780 while n:
780 781 p = self.changelog.parents(n)
781 782 if p[1] != nullid or p[0] == nullid:
782 783 b.append((t, n, p[0], p[1]))
783 784 break
784 785 n = p[0]
785 786 return b
786 787
787 788 def between(self, pairs):
788 789 r = []
789 790
790 791 for top, bottom in pairs:
791 792 n, l, i = top, [], 0
792 793 f = 1
793 794
794 795 while n != bottom:
795 796 p = self.changelog.parents(n)[0]
796 797 if i == f:
797 798 l.append(n)
798 799 f = f * 2
799 800 n = p
800 801 i += 1
801 802
802 803 r.append(l)
803 804
804 805 return r
805 806
806 807 def findincoming(self, remote, base=None, heads=None):
807 808 m = self.changelog.nodemap
808 809 search = []
809 810 fetch = {}
810 811 seen = {}
811 812 seenbranch = {}
812 813 if base == None:
813 814 base = {}
814 815
815 816 # assume we're closer to the tip than the root
816 817 # and start by examining the heads
817 818 self.ui.status(_("searching for changes\n"))
818 819
819 820 if not heads:
820 821 heads = remote.heads()
821 822
822 823 unknown = []
823 824 for h in heads:
824 825 if h not in m:
825 826 unknown.append(h)
826 827 else:
827 828 base[h] = 1
828 829
829 830 if not unknown:
830 831 return None
831 832
832 833 rep = {}
833 834 reqcnt = 0
834 835
835 836 # search through remote branches
836 837 # a 'branch' here is a linear segment of history, with four parts:
837 838 # head, root, first parent, second parent
838 839 # (a branch always has two parents (or none) by definition)
839 840 unknown = remote.branches(unknown)
840 841 while unknown:
841 842 r = []
842 843 while unknown:
843 844 n = unknown.pop(0)
844 845 if n[0] in seen:
845 846 continue
846 847
847 848 self.ui.debug(_("examining %s:%s\n")
848 849 % (short(n[0]), short(n[1])))
849 850 if n[0] == nullid:
850 851 break
851 852 if n in seenbranch:
852 853 self.ui.debug(_("branch already found\n"))
853 854 continue
854 855 if n[1] and n[1] in m: # do we know the base?
855 856 self.ui.debug(_("found incomplete branch %s:%s\n")
856 857 % (short(n[0]), short(n[1])))
857 858 search.append(n) # schedule branch range for scanning
858 859 seenbranch[n] = 1
859 860 else:
860 861 if n[1] not in seen and n[1] not in fetch:
861 862 if n[2] in m and n[3] in m:
862 863 self.ui.debug(_("found new changeset %s\n") %
863 864 short(n[1]))
864 865 fetch[n[1]] = 1 # earliest unknown
865 866 base[n[2]] = 1 # latest known
866 867 continue
867 868
868 869 for a in n[2:4]:
869 870 if a not in rep:
870 871 r.append(a)
871 872 rep[a] = 1
872 873
873 874 seen[n[0]] = 1
874 875
875 876 if r:
876 877 reqcnt += 1
877 878 self.ui.debug(_("request %d: %s\n") %
878 879 (reqcnt, " ".join(map(short, r))))
879 880 for p in range(0, len(r), 10):
880 881 for b in remote.branches(r[p:p+10]):
881 882 self.ui.debug(_("received %s:%s\n") %
882 883 (short(b[0]), short(b[1])))
883 884 if b[0] in m:
884 885 self.ui.debug(_("found base node %s\n")
885 886 % short(b[0]))
886 887 base[b[0]] = 1
887 888 elif b[0] not in seen:
888 889 unknown.append(b)
889 890
890 891 # do binary search on the branches we found
891 892 while search:
892 893 n = search.pop(0)
893 894 reqcnt += 1
894 895 l = remote.between([(n[0], n[1])])[0]
895 896 l.append(n[1])
896 897 p = n[0]
897 898 f = 1
898 899 for i in l:
899 900 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
900 901 if i in m:
901 902 if f <= 2:
902 903 self.ui.debug(_("found new branch changeset %s\n") %
903 904 short(p))
904 905 fetch[p] = 1
905 906 base[i] = 1
906 907 else:
907 908 self.ui.debug(_("narrowed branch search to %s:%s\n")
908 909 % (short(p), short(i)))
909 910 search.append((p, i))
910 911 break
911 912 p, f = i, f * 2
912 913
913 914 # sanity check our fetch list
914 915 for f in fetch.keys():
915 916 if f in m:
916 917 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
917 918
918 919 if base.keys() == [nullid]:
919 920 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
920 921
921 922 self.ui.note(_("found new changesets starting at ") +
922 923 " ".join([short(f) for f in fetch]) + "\n")
923 924
924 925 self.ui.debug(_("%d total queries\n") % reqcnt)
925 926
926 927 return fetch.keys()
927 928
928 929 def findoutgoing(self, remote, base=None, heads=None):
929 930 if base == None:
930 931 base = {}
931 932 self.findincoming(remote, base, heads)
932 933
933 934 self.ui.debug(_("common changesets up to ")
934 935 + " ".join(map(short, base.keys())) + "\n")
935 936
936 937 remain = dict.fromkeys(self.changelog.nodemap)
937 938
938 939 # prune everything remote has from the tree
939 940 del remain[nullid]
940 941 remove = base.keys()
941 942 while remove:
942 943 n = remove.pop(0)
943 944 if n in remain:
944 945 del remain[n]
945 946 for p in self.changelog.parents(n):
946 947 remove.append(p)
947 948
948 949 # find every node whose parents have been pruned
949 950 subset = []
950 951 for n in remain:
951 952 p1, p2 = self.changelog.parents(n)
952 953 if p1 not in remain and p2 not in remain:
953 954 subset.append(n)
954 955
955 956 # this is the set of all roots we have to push
956 957 return subset
957 958
958 959 def pull(self, remote, heads=None):
959 960 l = self.lock()
960 961
961 962 # if we have an empty repo, fetch everything
962 963 if self.changelog.tip() == nullid:
963 964 self.ui.status(_("requesting all changes\n"))
964 965 fetch = [nullid]
965 966 else:
966 967 fetch = self.findincoming(remote)
967 968
968 969 if not fetch:
969 970 self.ui.status(_("no changes found\n"))
970 971 return 1
971 972
972 973 if heads is None:
973 974 cg = remote.changegroup(fetch, 'pull')
974 975 else:
975 976 cg = remote.changegroupsubset(fetch, heads, 'pull')
976 977 return self.addchangegroup(cg)
977 978
978 979 def push(self, remote, force=False, revs=None):
979 980 lock = remote.lock()
980 981
981 982 base = {}
982 983 heads = remote.heads()
983 984 inc = self.findincoming(remote, base, heads)
984 985 if not force and inc:
985 986 self.ui.warn(_("abort: unsynced remote changes!\n"))
986 987 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
987 988 return 1
988 989
989 990 update = self.findoutgoing(remote, base)
990 991 if revs is not None:
991 992 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
992 993 else:
993 994 bases, heads = update, self.changelog.heads()
994 995
995 996 if not bases:
996 997 self.ui.status(_("no changes found\n"))
997 998 return 1
998 999 elif not force:
999 1000 if len(bases) < len(heads):
1000 1001 self.ui.warn(_("abort: push creates new remote branches!\n"))
1001 1002 self.ui.status(_("(did you forget to merge?"
1002 1003 " use push -f to force)\n"))
1003 1004 return 1
1004 1005
1005 1006 if revs is None:
1006 1007 cg = self.changegroup(update, 'push')
1007 1008 else:
1008 1009 cg = self.changegroupsubset(update, revs, 'push')
1009 1010 return remote.addchangegroup(cg)
1010 1011
1011 1012 def changegroupsubset(self, bases, heads, source):
1012 1013 """This function generates a changegroup consisting of all the nodes
1013 1014 that are descendents of any of the bases, and ancestors of any of
1014 1015 the heads.
1015 1016
1016 1017 It is fairly complex as determining which filenodes and which
1017 1018 manifest nodes need to be included for the changeset to be complete
1018 1019 is non-trivial.
1019 1020
1020 1021 Another wrinkle is doing the reverse, figuring out which changeset in
1021 1022 the changegroup a particular filenode or manifestnode belongs to."""
1022 1023
1023 1024 self.hook('preoutgoing', throw=True, source=source)
1024 1025
1025 1026 # Set up some initial variables
1026 1027 # Make it easy to refer to self.changelog
1027 1028 cl = self.changelog
1028 1029 # msng is short for missing - compute the list of changesets in this
1029 1030 # changegroup.
1030 1031 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1031 1032 # Some bases may turn out to be superfluous, and some heads may be
1032 1033 # too. nodesbetween will return the minimal set of bases and heads
1033 1034 # necessary to re-create the changegroup.
1034 1035
1035 1036 # Known heads are the list of heads that it is assumed the recipient
1036 1037 # of this changegroup will know about.
1037 1038 knownheads = {}
1038 1039 # We assume that all parents of bases are known heads.
1039 1040 for n in bases:
1040 1041 for p in cl.parents(n):
1041 1042 if p != nullid:
1042 1043 knownheads[p] = 1
1043 1044 knownheads = knownheads.keys()
1044 1045 if knownheads:
1045 1046 # Now that we know what heads are known, we can compute which
1046 1047 # changesets are known. The recipient must know about all
1047 1048 # changesets required to reach the known heads from the null
1048 1049 # changeset.
1049 1050 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1050 1051 junk = None
1051 1052 # Transform the list into an ersatz set.
1052 1053 has_cl_set = dict.fromkeys(has_cl_set)
1053 1054 else:
1054 1055 # If there were no known heads, the recipient cannot be assumed to
1055 1056 # know about any changesets.
1056 1057 has_cl_set = {}
1057 1058
1058 1059 # Make it easy to refer to self.manifest
1059 1060 mnfst = self.manifest
1060 1061 # We don't know which manifests are missing yet
1061 1062 msng_mnfst_set = {}
1062 1063 # Nor do we know which filenodes are missing.
1063 1064 msng_filenode_set = {}
1064 1065
1065 1066 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1066 1067 junk = None
1067 1068
1068 1069 # A changeset always belongs to itself, so the changenode lookup
1069 1070 # function for a changenode is identity.
1070 1071 def identity(x):
1071 1072 return x
1072 1073
1073 1074 # A function generating function. Sets up an environment for the
1074 1075 # inner function.
1075 1076 def cmp_by_rev_func(revlog):
1076 1077 # Compare two nodes by their revision number in the environment's
1077 1078 # revision history. Since the revision number both represents the
1078 1079 # most efficient order to read the nodes in, and represents a
1079 1080 # topological sorting of the nodes, this function is often useful.
1080 1081 def cmp_by_rev(a, b):
1081 1082 return cmp(revlog.rev(a), revlog.rev(b))
1082 1083 return cmp_by_rev
1083 1084
1084 1085 # If we determine that a particular file or manifest node must be a
1085 1086 # node that the recipient of the changegroup will already have, we can
1086 1087 # also assume the recipient will have all the parents. This function
1087 1088 # prunes them from the set of missing nodes.
1088 1089 def prune_parents(revlog, hasset, msngset):
1089 1090 haslst = hasset.keys()
1090 1091 haslst.sort(cmp_by_rev_func(revlog))
1091 1092 for node in haslst:
1092 1093 parentlst = [p for p in revlog.parents(node) if p != nullid]
1093 1094 while parentlst:
1094 1095 n = parentlst.pop()
1095 1096 if n not in hasset:
1096 1097 hasset[n] = 1
1097 1098 p = [p for p in revlog.parents(n) if p != nullid]
1098 1099 parentlst.extend(p)
1099 1100 for n in hasset:
1100 1101 msngset.pop(n, None)
1101 1102
1102 1103 # This is a function generating function used to set up an environment
1103 1104 # for the inner function to execute in.
1104 1105 def manifest_and_file_collector(changedfileset):
1105 1106 # This is an information gathering function that gathers
1106 1107 # information from each changeset node that goes out as part of
1107 1108 # the changegroup. The information gathered is a list of which
1108 1109 # manifest nodes are potentially required (the recipient may
1109 1110 # already have them) and total list of all files which were
1110 1111 # changed in any changeset in the changegroup.
1111 1112 #
1112 1113 # We also remember the first changenode we saw any manifest
1113 1114 # referenced by so we can later determine which changenode 'owns'
1114 1115 # the manifest.
1115 1116 def collect_manifests_and_files(clnode):
1116 1117 c = cl.read(clnode)
1117 1118 for f in c[3]:
1118 1119 # This is to make sure we only have one instance of each
1119 1120 # filename string for each filename.
1120 1121 changedfileset.setdefault(f, f)
1121 1122 msng_mnfst_set.setdefault(c[0], clnode)
1122 1123 return collect_manifests_and_files
1123 1124
1124 1125 # Figure out which manifest nodes (of the ones we think might be part
1125 1126 # of the changegroup) the recipient must know about and remove them
1126 1127 # from the changegroup.
1127 1128 def prune_manifests():
1128 1129 has_mnfst_set = {}
1129 1130 for n in msng_mnfst_set:
1130 1131 # If a 'missing' manifest thinks it belongs to a changenode
1131 1132 # the recipient is assumed to have, obviously the recipient
1132 1133 # must have that manifest.
1133 1134 linknode = cl.node(mnfst.linkrev(n))
1134 1135 if linknode in has_cl_set:
1135 1136 has_mnfst_set[n] = 1
1136 1137 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1137 1138
1138 1139 # Use the information collected in collect_manifests_and_files to say
1139 1140 # which changenode any manifestnode belongs to.
1140 1141 def lookup_manifest_link(mnfstnode):
1141 1142 return msng_mnfst_set[mnfstnode]
1142 1143
1143 1144 # A function generating function that sets up the initial environment
1144 1145 # the inner function.
1145 1146 def filenode_collector(changedfiles):
1146 1147 next_rev = [0]
1147 1148 # This gathers information from each manifestnode included in the
1148 1149 # changegroup about which filenodes the manifest node references
1149 1150 # so we can include those in the changegroup too.
1150 1151 #
1151 1152 # It also remembers which changenode each filenode belongs to. It
1152 1153 # does this by assuming the a filenode belongs to the changenode
1153 1154 # the first manifest that references it belongs to.
1154 1155 def collect_msng_filenodes(mnfstnode):
1155 1156 r = mnfst.rev(mnfstnode)
1156 1157 if r == next_rev[0]:
1157 1158 # If the last rev we looked at was the one just previous,
1158 1159 # we only need to see a diff.
1159 1160 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1160 1161 # For each line in the delta
1161 1162 for dline in delta.splitlines():
1162 1163 # get the filename and filenode for that line
1163 1164 f, fnode = dline.split('\0')
1164 1165 fnode = bin(fnode[:40])
1165 1166 f = changedfiles.get(f, None)
1166 1167 # And if the file is in the list of files we care
1167 1168 # about.
1168 1169 if f is not None:
1169 1170 # Get the changenode this manifest belongs to
1170 1171 clnode = msng_mnfst_set[mnfstnode]
1171 1172 # Create the set of filenodes for the file if
1172 1173 # there isn't one already.
1173 1174 ndset = msng_filenode_set.setdefault(f, {})
1174 1175 # And set the filenode's changelog node to the
1175 1176 # manifest's if it hasn't been set already.
1176 1177 ndset.setdefault(fnode, clnode)
1177 1178 else:
1178 1179 # Otherwise we need a full manifest.
1179 1180 m = mnfst.read(mnfstnode)
1180 1181 # For every file in we care about.
1181 1182 for f in changedfiles:
1182 1183 fnode = m.get(f, None)
1183 1184 # If it's in the manifest
1184 1185 if fnode is not None:
1185 1186 # See comments above.
1186 1187 clnode = msng_mnfst_set[mnfstnode]
1187 1188 ndset = msng_filenode_set.setdefault(f, {})
1188 1189 ndset.setdefault(fnode, clnode)
1189 1190 # Remember the revision we hope to see next.
1190 1191 next_rev[0] = r + 1
1191 1192 return collect_msng_filenodes
1192 1193
1193 1194 # We have a list of filenodes we think we need for a file, lets remove
1194 1195 # all those we now the recipient must have.
1195 1196 def prune_filenodes(f, filerevlog):
1196 1197 msngset = msng_filenode_set[f]
1197 1198 hasset = {}
1198 1199 # If a 'missing' filenode thinks it belongs to a changenode we
1199 1200 # assume the recipient must have, then the recipient must have
1200 1201 # that filenode.
1201 1202 for n in msngset:
1202 1203 clnode = cl.node(filerevlog.linkrev(n))
1203 1204 if clnode in has_cl_set:
1204 1205 hasset[n] = 1
1205 1206 prune_parents(filerevlog, hasset, msngset)
1206 1207
1207 1208 # A function generator function that sets up the a context for the
1208 1209 # inner function.
1209 1210 def lookup_filenode_link_func(fname):
1210 1211 msngset = msng_filenode_set[fname]
1211 1212 # Lookup the changenode the filenode belongs to.
1212 1213 def lookup_filenode_link(fnode):
1213 1214 return msngset[fnode]
1214 1215 return lookup_filenode_link
1215 1216
1216 1217 # Now that we have all theses utility functions to help out and
1217 1218 # logically divide up the task, generate the group.
1218 1219 def gengroup():
1219 1220 # The set of changed files starts empty.
1220 1221 changedfiles = {}
1221 1222 # Create a changenode group generator that will call our functions
1222 1223 # back to lookup the owning changenode and collect information.
1223 1224 group = cl.group(msng_cl_lst, identity,
1224 1225 manifest_and_file_collector(changedfiles))
1225 1226 for chnk in group:
1226 1227 yield chnk
1227 1228
1228 1229 # The list of manifests has been collected by the generator
1229 1230 # calling our functions back.
1230 1231 prune_manifests()
1231 1232 msng_mnfst_lst = msng_mnfst_set.keys()
1232 1233 # Sort the manifestnodes by revision number.
1233 1234 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1234 1235 # Create a generator for the manifestnodes that calls our lookup
1235 1236 # and data collection functions back.
1236 1237 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1237 1238 filenode_collector(changedfiles))
1238 1239 for chnk in group:
1239 1240 yield chnk
1240 1241
1241 1242 # These are no longer needed, dereference and toss the memory for
1242 1243 # them.
1243 1244 msng_mnfst_lst = None
1244 1245 msng_mnfst_set.clear()
1245 1246
1246 1247 changedfiles = changedfiles.keys()
1247 1248 changedfiles.sort()
1248 1249 # Go through all our files in order sorted by name.
1249 1250 for fname in changedfiles:
1250 1251 filerevlog = self.file(fname)
1251 1252 # Toss out the filenodes that the recipient isn't really
1252 1253 # missing.
1253 1254 if msng_filenode_set.has_key(fname):
1254 1255 prune_filenodes(fname, filerevlog)
1255 1256 msng_filenode_lst = msng_filenode_set[fname].keys()
1256 1257 else:
1257 1258 msng_filenode_lst = []
1258 1259 # If any filenodes are left, generate the group for them,
1259 1260 # otherwise don't bother.
1260 1261 if len(msng_filenode_lst) > 0:
1261 1262 yield struct.pack(">l", len(fname) + 4) + fname
1262 1263 # Sort the filenodes by their revision #
1263 1264 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1264 1265 # Create a group generator and only pass in a changenode
1265 1266 # lookup function as we need to collect no information
1266 1267 # from filenodes.
1267 1268 group = filerevlog.group(msng_filenode_lst,
1268 1269 lookup_filenode_link_func(fname))
1269 1270 for chnk in group:
1270 1271 yield chnk
1271 1272 if msng_filenode_set.has_key(fname):
1272 1273 # Don't need this anymore, toss it to free memory.
1273 1274 del msng_filenode_set[fname]
1274 1275 # Signal that no more groups are left.
1275 1276 yield struct.pack(">l", 0)
1276 1277
1277 1278 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1278 1279
1279 1280 return util.chunkbuffer(gengroup())
1280 1281
1281 1282 def changegroup(self, basenodes, source):
1282 1283 """Generate a changegroup of all nodes that we have that a recipient
1283 1284 doesn't.
1284 1285
1285 1286 This is much easier than the previous function as we can assume that
1286 1287 the recipient has any changenode we aren't sending them."""
1287 1288
1288 1289 self.hook('preoutgoing', throw=True, source=source)
1289 1290
1290 1291 cl = self.changelog
1291 1292 nodes = cl.nodesbetween(basenodes, None)[0]
1292 1293 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1293 1294
1294 1295 def identity(x):
1295 1296 return x
1296 1297
1297 1298 def gennodelst(revlog):
1298 1299 for r in xrange(0, revlog.count()):
1299 1300 n = revlog.node(r)
1300 1301 if revlog.linkrev(n) in revset:
1301 1302 yield n
1302 1303
1303 1304 def changed_file_collector(changedfileset):
1304 1305 def collect_changed_files(clnode):
1305 1306 c = cl.read(clnode)
1306 1307 for fname in c[3]:
1307 1308 changedfileset[fname] = 1
1308 1309 return collect_changed_files
1309 1310
1310 1311 def lookuprevlink_func(revlog):
1311 1312 def lookuprevlink(n):
1312 1313 return cl.node(revlog.linkrev(n))
1313 1314 return lookuprevlink
1314 1315
1315 1316 def gengroup():
1316 1317 # construct a list of all changed files
1317 1318 changedfiles = {}
1318 1319
1319 1320 for chnk in cl.group(nodes, identity,
1320 1321 changed_file_collector(changedfiles)):
1321 1322 yield chnk
1322 1323 changedfiles = changedfiles.keys()
1323 1324 changedfiles.sort()
1324 1325
1325 1326 mnfst = self.manifest
1326 1327 nodeiter = gennodelst(mnfst)
1327 1328 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1328 1329 yield chnk
1329 1330
1330 1331 for fname in changedfiles:
1331 1332 filerevlog = self.file(fname)
1332 1333 nodeiter = gennodelst(filerevlog)
1333 1334 nodeiter = list(nodeiter)
1334 1335 if nodeiter:
1335 1336 yield struct.pack(">l", len(fname) + 4) + fname
1336 1337 lookup = lookuprevlink_func(filerevlog)
1337 1338 for chnk in filerevlog.group(nodeiter, lookup):
1338 1339 yield chnk
1339 1340
1340 1341 yield struct.pack(">l", 0)
1341 1342 self.hook('outgoing', node=hex(nodes[0]), source=source)
1342 1343
1343 1344 return util.chunkbuffer(gengroup())
1344 1345
1345 1346 def addchangegroup(self, source):
1346 1347
1347 1348 def getchunk():
1348 1349 d = source.read(4)
1349 1350 if not d:
1350 1351 return ""
1351 1352 l = struct.unpack(">l", d)[0]
1352 1353 if l <= 4:
1353 1354 return ""
1354 1355 d = source.read(l - 4)
1355 1356 if len(d) < l - 4:
1356 1357 raise repo.RepoError(_("premature EOF reading chunk"
1357 1358 " (got %d bytes, expected %d)")
1358 1359 % (len(d), l - 4))
1359 1360 return d
1360 1361
1361 1362 def getgroup():
1362 1363 while 1:
1363 1364 c = getchunk()
1364 1365 if not c:
1365 1366 break
1366 1367 yield c
1367 1368
1368 1369 def csmap(x):
1369 1370 self.ui.debug(_("add changeset %s\n") % short(x))
1370 1371 return self.changelog.count()
1371 1372
1372 1373 def revmap(x):
1373 1374 return self.changelog.rev(x)
1374 1375
1375 1376 if not source:
1376 1377 return
1377 1378
1378 1379 self.hook('prechangegroup', throw=True)
1379 1380
1380 1381 changesets = files = revisions = 0
1381 1382
1382 1383 tr = self.transaction()
1383 1384
1384 1385 oldheads = len(self.changelog.heads())
1385 1386
1386 1387 # pull off the changeset group
1387 1388 self.ui.status(_("adding changesets\n"))
1388 1389 co = self.changelog.tip()
1389 1390 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1390 1391 cnr, cor = map(self.changelog.rev, (cn, co))
1391 1392 if cn == nullid:
1392 1393 cnr = cor
1393 1394 changesets = cnr - cor
1394 1395
1395 1396 # pull off the manifest group
1396 1397 self.ui.status(_("adding manifests\n"))
1397 1398 mm = self.manifest.tip()
1398 1399 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1399 1400
1400 1401 # process the files
1401 1402 self.ui.status(_("adding file changes\n"))
1402 1403 while 1:
1403 1404 f = getchunk()
1404 1405 if not f:
1405 1406 break
1406 1407 self.ui.debug(_("adding %s revisions\n") % f)
1407 1408 fl = self.file(f)
1408 1409 o = fl.count()
1409 1410 n = fl.addgroup(getgroup(), revmap, tr)
1410 1411 revisions += fl.count() - o
1411 1412 files += 1
1412 1413
1413 1414 newheads = len(self.changelog.heads())
1414 1415 heads = ""
1415 1416 if oldheads and newheads > oldheads:
1416 1417 heads = _(" (+%d heads)") % (newheads - oldheads)
1417 1418
1418 1419 self.ui.status(_("added %d changesets"
1419 1420 " with %d changes to %d files%s\n")
1420 1421 % (changesets, revisions, files, heads))
1421 1422
1422 1423 self.hook('pretxnchangegroup', throw=True,
1423 1424 node=hex(self.changelog.node(cor+1)))
1424 1425
1425 1426 tr.close()
1426 1427
1427 1428 if changesets > 0:
1428 1429 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1429 1430
1430 1431 for i in range(cor + 1, cnr + 1):
1431 1432 self.hook("incoming", node=hex(self.changelog.node(i)))
1432 1433
1433 1434 def update(self, node, allow=False, force=False, choose=None,
1434 1435 moddirstate=True, forcemerge=False, wlock=None):
1435 1436 pl = self.dirstate.parents()
1436 1437 if not force and pl[1] != nullid:
1437 1438 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1438 1439 return 1
1439 1440
1440 1441 err = False
1441 1442
1442 1443 p1, p2 = pl[0], node
1443 1444 pa = self.changelog.ancestor(p1, p2)
1444 1445 m1n = self.changelog.read(p1)[0]
1445 1446 m2n = self.changelog.read(p2)[0]
1446 1447 man = self.manifest.ancestor(m1n, m2n)
1447 1448 m1 = self.manifest.read(m1n)
1448 1449 mf1 = self.manifest.readflags(m1n)
1449 1450 m2 = self.manifest.read(m2n).copy()
1450 1451 mf2 = self.manifest.readflags(m2n)
1451 1452 ma = self.manifest.read(man)
1452 1453 mfa = self.manifest.readflags(man)
1453 1454
1454 1455 modified, added, removed, deleted, unknown = self.changes()
1455 1456
1456 1457 # is this a jump, or a merge? i.e. is there a linear path
1457 1458 # from p1 to p2?
1458 1459 linear_path = (pa == p1 or pa == p2)
1459 1460
1460 1461 if allow and linear_path:
1461 1462 raise util.Abort(_("there is nothing to merge, "
1462 1463 "just use 'hg update'"))
1463 1464 if allow and not forcemerge:
1464 1465 if modified or added or removed:
1465 1466 raise util.Abort(_("outstanding uncommited changes"))
1466 1467 if not forcemerge and not force:
1467 1468 for f in unknown:
1468 1469 if f in m2:
1469 1470 t1 = self.wread(f)
1470 1471 t2 = self.file(f).read(m2[f])
1471 1472 if cmp(t1, t2) != 0:
1472 1473 raise util.Abort(_("'%s' already exists in the working"
1473 1474 " dir and differs from remote") % f)
1474 1475
1475 1476 # resolve the manifest to determine which files
1476 1477 # we care about merging
1477 1478 self.ui.note(_("resolving manifests\n"))
1478 1479 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1479 1480 (force, allow, moddirstate, linear_path))
1480 1481 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1481 1482 (short(man), short(m1n), short(m2n)))
1482 1483
1483 1484 merge = {}
1484 1485 get = {}
1485 1486 remove = []
1486 1487
1487 1488 # construct a working dir manifest
1488 1489 mw = m1.copy()
1489 1490 mfw = mf1.copy()
1490 1491 umap = dict.fromkeys(unknown)
1491 1492
1492 1493 for f in added + modified + unknown:
1493 1494 mw[f] = ""
1494 1495 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1495 1496
1496 1497 if moddirstate and not wlock:
1497 1498 wlock = self.wlock()
1498 1499
1499 1500 for f in deleted + removed:
1500 1501 if f in mw:
1501 1502 del mw[f]
1502 1503
1503 1504 # If we're jumping between revisions (as opposed to merging),
1504 1505 # and if neither the working directory nor the target rev has
1505 1506 # the file, then we need to remove it from the dirstate, to
1506 1507 # prevent the dirstate from listing the file when it is no
1507 1508 # longer in the manifest.
1508 1509 if moddirstate and linear_path and f not in m2:
1509 1510 self.dirstate.forget((f,))
1510 1511
1511 1512 # Compare manifests
1512 1513 for f, n in mw.iteritems():
1513 1514 if choose and not choose(f):
1514 1515 continue
1515 1516 if f in m2:
1516 1517 s = 0
1517 1518
1518 1519 # is the wfile new since m1, and match m2?
1519 1520 if f not in m1:
1520 1521 t1 = self.wread(f)
1521 1522 t2 = self.file(f).read(m2[f])
1522 1523 if cmp(t1, t2) == 0:
1523 1524 n = m2[f]
1524 1525 del t1, t2
1525 1526
1526 1527 # are files different?
1527 1528 if n != m2[f]:
1528 1529 a = ma.get(f, nullid)
1529 1530 # are both different from the ancestor?
1530 1531 if n != a and m2[f] != a:
1531 1532 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1532 1533 # merge executable bits
1533 1534 # "if we changed or they changed, change in merge"
1534 1535 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1535 1536 mode = ((a^b) | (a^c)) ^ a
1536 1537 merge[f] = (m1.get(f, nullid), m2[f], mode)
1537 1538 s = 1
1538 1539 # are we clobbering?
1539 1540 # is remote's version newer?
1540 1541 # or are we going back in time?
1541 1542 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1542 1543 self.ui.debug(_(" remote %s is newer, get\n") % f)
1543 1544 get[f] = m2[f]
1544 1545 s = 1
1545 1546 elif f in umap:
1546 1547 # this unknown file is the same as the checkout
1547 1548 get[f] = m2[f]
1548 1549
1549 1550 if not s and mfw[f] != mf2[f]:
1550 1551 if force:
1551 1552 self.ui.debug(_(" updating permissions for %s\n") % f)
1552 1553 util.set_exec(self.wjoin(f), mf2[f])
1553 1554 else:
1554 1555 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1555 1556 mode = ((a^b) | (a^c)) ^ a
1556 1557 if mode != b:
1557 1558 self.ui.debug(_(" updating permissions for %s\n")
1558 1559 % f)
1559 1560 util.set_exec(self.wjoin(f), mode)
1560 1561 del m2[f]
1561 1562 elif f in ma:
1562 1563 if n != ma[f]:
1563 1564 r = _("d")
1564 1565 if not force and (linear_path or allow):
1565 1566 r = self.ui.prompt(
1566 1567 (_(" local changed %s which remote deleted\n") % f) +
1567 1568 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1568 1569 if r == _("d"):
1569 1570 remove.append(f)
1570 1571 else:
1571 1572 self.ui.debug(_("other deleted %s\n") % f)
1572 1573 remove.append(f) # other deleted it
1573 1574 else:
1574 1575 # file is created on branch or in working directory
1575 1576 if force and f not in umap:
1576 1577 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1577 1578 remove.append(f)
1578 1579 elif n == m1.get(f, nullid): # same as parent
1579 1580 if p2 == pa: # going backwards?
1580 1581 self.ui.debug(_("remote deleted %s\n") % f)
1581 1582 remove.append(f)
1582 1583 else:
1583 1584 self.ui.debug(_("local modified %s, keeping\n") % f)
1584 1585 else:
1585 1586 self.ui.debug(_("working dir created %s, keeping\n") % f)
1586 1587
1587 1588 for f, n in m2.iteritems():
1588 1589 if choose and not choose(f):
1589 1590 continue
1590 1591 if f[0] == "/":
1591 1592 continue
1592 1593 if f in ma and n != ma[f]:
1593 1594 r = _("k")
1594 1595 if not force and (linear_path or allow):
1595 1596 r = self.ui.prompt(
1596 1597 (_("remote changed %s which local deleted\n") % f) +
1597 1598 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1598 1599 if r == _("k"):
1599 1600 get[f] = n
1600 1601 elif f not in ma:
1601 1602 self.ui.debug(_("remote created %s\n") % f)
1602 1603 get[f] = n
1603 1604 else:
1604 1605 if force or p2 == pa: # going backwards?
1605 1606 self.ui.debug(_("local deleted %s, recreating\n") % f)
1606 1607 get[f] = n
1607 1608 else:
1608 1609 self.ui.debug(_("local deleted %s\n") % f)
1609 1610
1610 1611 del mw, m1, m2, ma
1611 1612
1612 1613 if force:
1613 1614 for f in merge:
1614 1615 get[f] = merge[f][1]
1615 1616 merge = {}
1616 1617
1617 1618 if linear_path or force:
1618 1619 # we don't need to do any magic, just jump to the new rev
1619 1620 branch_merge = False
1620 1621 p1, p2 = p2, nullid
1621 1622 else:
1622 1623 if not allow:
1623 1624 self.ui.status(_("this update spans a branch"
1624 1625 " affecting the following files:\n"))
1625 1626 fl = merge.keys() + get.keys()
1626 1627 fl.sort()
1627 1628 for f in fl:
1628 1629 cf = ""
1629 1630 if f in merge:
1630 1631 cf = _(" (resolve)")
1631 1632 self.ui.status(" %s%s\n" % (f, cf))
1632 1633 self.ui.warn(_("aborting update spanning branches!\n"))
1633 1634 self.ui.status(_("(use update -m to merge across branches"
1634 1635 " or -C to lose changes)\n"))
1635 1636 return 1
1636 1637 branch_merge = True
1637 1638
1638 1639 # get the files we don't need to change
1639 1640 files = get.keys()
1640 1641 files.sort()
1641 1642 for f in files:
1642 1643 if f[0] == "/":
1643 1644 continue
1644 1645 self.ui.note(_("getting %s\n") % f)
1645 1646 t = self.file(f).read(get[f])
1646 1647 self.wwrite(f, t)
1647 1648 util.set_exec(self.wjoin(f), mf2[f])
1648 1649 if moddirstate:
1649 1650 if branch_merge:
1650 1651 self.dirstate.update([f], 'n', st_mtime=-1)
1651 1652 else:
1652 1653 self.dirstate.update([f], 'n')
1653 1654
1654 1655 # merge the tricky bits
1655 1656 files = merge.keys()
1656 1657 files.sort()
1657 1658 for f in files:
1658 1659 self.ui.status(_("merging %s\n") % f)
1659 1660 my, other, flag = merge[f]
1660 1661 ret = self.merge3(f, my, other)
1661 1662 if ret:
1662 1663 err = True
1663 1664 util.set_exec(self.wjoin(f), flag)
1664 1665 if moddirstate:
1665 1666 if branch_merge:
1666 1667 # We've done a branch merge, mark this file as merged
1667 1668 # so that we properly record the merger later
1668 1669 self.dirstate.update([f], 'm')
1669 1670 else:
1670 1671 # We've update-merged a locally modified file, so
1671 1672 # we set the dirstate to emulate a normal checkout
1672 1673 # of that file some time in the past. Thus our
1673 1674 # merge will appear as a normal local file
1674 1675 # modification.
1675 1676 f_len = len(self.file(f).read(other))
1676 1677 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1677 1678
1678 1679 remove.sort()
1679 1680 for f in remove:
1680 1681 self.ui.note(_("removing %s\n") % f)
1681 1682 try:
1682 1683 util.unlink(self.wjoin(f))
1683 1684 except OSError, inst:
1684 1685 if inst.errno != errno.ENOENT:
1685 1686 self.ui.warn(_("update failed to remove %s: %s!\n") %
1686 1687 (f, inst.strerror))
1687 1688 if moddirstate:
1688 1689 if branch_merge:
1689 1690 self.dirstate.update(remove, 'r')
1690 1691 else:
1691 1692 self.dirstate.forget(remove)
1692 1693
1693 1694 if moddirstate:
1694 1695 self.dirstate.setparents(p1, p2)
1695 1696 return err
1696 1697
1697 1698 def merge3(self, fn, my, other):
1698 1699 """perform a 3-way merge in the working directory"""
1699 1700
1700 1701 def temp(prefix, node):
1701 1702 pre = "%s~%s." % (os.path.basename(fn), prefix)
1702 1703 (fd, name) = tempfile.mkstemp("", pre)
1703 1704 f = os.fdopen(fd, "wb")
1704 1705 self.wwrite(fn, fl.read(node), f)
1705 1706 f.close()
1706 1707 return name
1707 1708
1708 1709 fl = self.file(fn)
1709 1710 base = fl.ancestor(my, other)
1710 1711 a = self.wjoin(fn)
1711 1712 b = temp("base", base)
1712 1713 c = temp("other", other)
1713 1714
1714 1715 self.ui.note(_("resolving %s\n") % fn)
1715 1716 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1716 1717 (fn, short(my), short(other), short(base)))
1717 1718
1718 1719 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1719 1720 or "hgmerge")
1720 1721 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1721 1722 if r:
1722 1723 self.ui.warn(_("merging %s failed!\n") % fn)
1723 1724
1724 1725 os.unlink(b)
1725 1726 os.unlink(c)
1726 1727 return r
1727 1728
1728 1729 def verify(self):
1729 1730 filelinkrevs = {}
1730 1731 filenodes = {}
1731 1732 changesets = revisions = files = 0
1732 1733 errors = [0]
1733 1734 neededmanifests = {}
1734 1735
1735 1736 def err(msg):
1736 1737 self.ui.warn(msg + "\n")
1737 1738 errors[0] += 1
1738 1739
1739 1740 def checksize(obj, name):
1740 1741 d = obj.checksize()
1741 1742 if d[0]:
1742 1743 err(_("%s data length off by %d bytes") % (name, d[0]))
1743 1744 if d[1]:
1744 1745 err(_("%s index contains %d extra bytes") % (name, d[1]))
1745 1746
1746 1747 seen = {}
1747 1748 self.ui.status(_("checking changesets\n"))
1748 1749 checksize(self.changelog, "changelog")
1749 1750
1750 1751 for i in range(self.changelog.count()):
1751 1752 changesets += 1
1752 1753 n = self.changelog.node(i)
1753 1754 l = self.changelog.linkrev(n)
1754 1755 if l != i:
1755 1756 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1756 1757 if n in seen:
1757 1758 err(_("duplicate changeset at revision %d") % i)
1758 1759 seen[n] = 1
1759 1760
1760 1761 for p in self.changelog.parents(n):
1761 1762 if p not in self.changelog.nodemap:
1762 1763 err(_("changeset %s has unknown parent %s") %
1763 1764 (short(n), short(p)))
1764 1765 try:
1765 1766 changes = self.changelog.read(n)
1766 1767 except KeyboardInterrupt:
1767 1768 self.ui.warn(_("interrupted"))
1768 1769 raise
1769 1770 except Exception, inst:
1770 1771 err(_("unpacking changeset %s: %s") % (short(n), inst))
1771 1772
1772 1773 neededmanifests[changes[0]] = n
1773 1774
1774 1775 for f in changes[3]:
1775 1776 filelinkrevs.setdefault(f, []).append(i)
1776 1777
1777 1778 seen = {}
1778 1779 self.ui.status(_("checking manifests\n"))
1779 1780 checksize(self.manifest, "manifest")
1780 1781
1781 1782 for i in range(self.manifest.count()):
1782 1783 n = self.manifest.node(i)
1783 1784 l = self.manifest.linkrev(n)
1784 1785
1785 1786 if l < 0 or l >= self.changelog.count():
1786 1787 err(_("bad manifest link (%d) at revision %d") % (l, i))
1787 1788
1788 1789 if n in neededmanifests:
1789 1790 del neededmanifests[n]
1790 1791
1791 1792 if n in seen:
1792 1793 err(_("duplicate manifest at revision %d") % i)
1793 1794
1794 1795 seen[n] = 1
1795 1796
1796 1797 for p in self.manifest.parents(n):
1797 1798 if p not in self.manifest.nodemap:
1798 1799 err(_("manifest %s has unknown parent %s") %
1799 1800 (short(n), short(p)))
1800 1801
1801 1802 try:
1802 1803 delta = mdiff.patchtext(self.manifest.delta(n))
1803 1804 except KeyboardInterrupt:
1804 1805 self.ui.warn(_("interrupted"))
1805 1806 raise
1806 1807 except Exception, inst:
1807 1808 err(_("unpacking manifest %s: %s") % (short(n), inst))
1808 1809
1809 1810 ff = [ l.split('\0') for l in delta.splitlines() ]
1810 1811 for f, fn in ff:
1811 1812 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1812 1813
1813 1814 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1814 1815
1815 1816 for m, c in neededmanifests.items():
1816 1817 err(_("Changeset %s refers to unknown manifest %s") %
1817 1818 (short(m), short(c)))
1818 1819 del neededmanifests
1819 1820
1820 1821 for f in filenodes:
1821 1822 if f not in filelinkrevs:
1822 1823 err(_("file %s in manifest but not in changesets") % f)
1823 1824
1824 1825 for f in filelinkrevs:
1825 1826 if f not in filenodes:
1826 1827 err(_("file %s in changeset but not in manifest") % f)
1827 1828
1828 1829 self.ui.status(_("checking files\n"))
1829 1830 ff = filenodes.keys()
1830 1831 ff.sort()
1831 1832 for f in ff:
1832 1833 if f == "/dev/null":
1833 1834 continue
1834 1835 files += 1
1835 1836 fl = self.file(f)
1836 1837 checksize(fl, f)
1837 1838
1838 1839 nodes = {nullid: 1}
1839 1840 seen = {}
1840 1841 for i in range(fl.count()):
1841 1842 revisions += 1
1842 1843 n = fl.node(i)
1843 1844
1844 1845 if n in seen:
1845 1846 err(_("%s: duplicate revision %d") % (f, i))
1846 1847 if n not in filenodes[f]:
1847 1848 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1848 1849 else:
1849 1850 del filenodes[f][n]
1850 1851
1851 1852 flr = fl.linkrev(n)
1852 1853 if flr not in filelinkrevs[f]:
1853 1854 err(_("%s:%s points to unexpected changeset %d")
1854 1855 % (f, short(n), flr))
1855 1856 else:
1856 1857 filelinkrevs[f].remove(flr)
1857 1858
1858 1859 # verify contents
1859 1860 try:
1860 1861 t = fl.read(n)
1861 1862 except KeyboardInterrupt:
1862 1863 self.ui.warn(_("interrupted"))
1863 1864 raise
1864 1865 except Exception, inst:
1865 1866 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1866 1867
1867 1868 # verify parents
1868 1869 (p1, p2) = fl.parents(n)
1869 1870 if p1 not in nodes:
1870 1871 err(_("file %s:%s unknown parent 1 %s") %
1871 1872 (f, short(n), short(p1)))
1872 1873 if p2 not in nodes:
1873 1874 err(_("file %s:%s unknown parent 2 %s") %
1874 1875 (f, short(n), short(p1)))
1875 1876 nodes[n] = 1
1876 1877
1877 1878 # cross-check
1878 1879 for node in filenodes[f]:
1879 1880 err(_("node %s in manifests not in %s") % (hex(node), f))
1880 1881
1881 1882 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1882 1883 (files, changesets, revisions))
1883 1884
1884 1885 if errors[0]:
1885 1886 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1886 1887 return 1
1887 1888
1888 1889 # used to avoid circular references so destructors work
1889 1890 def aftertrans(base):
1890 1891 p = base
1891 1892 def a():
1892 1893 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1893 1894 util.rename(os.path.join(p, "journal.dirstate"),
1894 1895 os.path.join(p, "undo.dirstate"))
1895 1896 return a
1896 1897
General Comments 0
You need to be logged in to leave comments. Login now