##// END OF EJS Templates
hooks run after transactions finish must not affect method results.
Vadim Gelfer -
r1717:7a4a16a7 default
parent child Browse files
Show More
@@ -1,1846 +1,1840 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14 14
15 15 class localrepository(object):
16 16 def __init__(self, ui, path=None, create=0):
17 17 if not path:
18 18 p = os.getcwd()
19 19 while not os.path.isdir(os.path.join(p, ".hg")):
20 20 oldp = p
21 21 p = os.path.dirname(p)
22 22 if p == oldp:
23 23 raise repo.RepoError(_("no repo found"))
24 24 path = p
25 25 self.path = os.path.join(path, ".hg")
26 26
27 27 if not create and not os.path.isdir(self.path):
28 28 raise repo.RepoError(_("repository %s not found") % path)
29 29
30 30 self.root = os.path.abspath(path)
31 31 self.ui = ui
32 32 self.opener = util.opener(self.path)
33 33 self.wopener = util.opener(self.root)
34 34 self.manifest = manifest.manifest(self.opener)
35 35 self.changelog = changelog.changelog(self.opener)
36 36 self.tagscache = None
37 37 self.nodetagscache = None
38 38 self.encodepats = None
39 39 self.decodepats = None
40 40
41 41 if create:
42 42 os.mkdir(self.path)
43 43 os.mkdir(self.join("data"))
44 44
45 45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
46 46 try:
47 47 self.ui.readconfig(self.join("hgrc"))
48 48 except IOError:
49 49 pass
50 50
51 51 def hook(self, name, **args):
52 52 def runhook(name, cmd):
53 53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
54 54 old = {}
55 55 for k, v in args.items():
56 56 k = k.upper()
57 57 old[k] = os.environ.get(k, None)
58 58 os.environ[k] = v
59 59
60 60 # Hooks run in the repository root
61 61 olddir = os.getcwd()
62 62 os.chdir(self.root)
63 63 r = os.system(cmd)
64 64 os.chdir(olddir)
65 65
66 66 for k, v in old.items():
67 67 if v != None:
68 68 os.environ[k] = v
69 69 else:
70 70 del os.environ[k]
71 71
72 72 if r:
73 73 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
74 74 (name, r))
75 75 return False
76 76 return True
77 77
78 78 r = True
79 79 for hname, cmd in self.ui.configitems("hooks"):
80 80 s = hname.split(".")
81 81 if s[0] == name and cmd:
82 82 r = runhook(hname, cmd) and r
83 83 return r
84 84
85 85 def tags(self):
86 86 '''return a mapping of tag to node'''
87 87 if not self.tagscache:
88 88 self.tagscache = {}
89 89 def addtag(self, k, n):
90 90 try:
91 91 bin_n = bin(n)
92 92 except TypeError:
93 93 bin_n = ''
94 94 self.tagscache[k.strip()] = bin_n
95 95
96 96 try:
97 97 # read each head of the tags file, ending with the tip
98 98 # and add each tag found to the map, with "newer" ones
99 99 # taking precedence
100 100 fl = self.file(".hgtags")
101 101 h = fl.heads()
102 102 h.reverse()
103 103 for r in h:
104 104 for l in fl.read(r).splitlines():
105 105 if l:
106 106 n, k = l.split(" ", 1)
107 107 addtag(self, k, n)
108 108 except KeyError:
109 109 pass
110 110
111 111 try:
112 112 f = self.opener("localtags")
113 113 for l in f:
114 114 n, k = l.split(" ", 1)
115 115 addtag(self, k, n)
116 116 except IOError:
117 117 pass
118 118
119 119 self.tagscache['tip'] = self.changelog.tip()
120 120
121 121 return self.tagscache
122 122
123 123 def tagslist(self):
124 124 '''return a list of tags ordered by revision'''
125 125 l = []
126 126 for t, n in self.tags().items():
127 127 try:
128 128 r = self.changelog.rev(n)
129 129 except:
130 130 r = -2 # sort to the beginning of the list if unknown
131 131 l.append((r, t, n))
132 132 l.sort()
133 133 return [(t, n) for r, t, n in l]
134 134
135 135 def nodetags(self, node):
136 136 '''return the tags associated with a node'''
137 137 if not self.nodetagscache:
138 138 self.nodetagscache = {}
139 139 for t, n in self.tags().items():
140 140 self.nodetagscache.setdefault(n, []).append(t)
141 141 return self.nodetagscache.get(node, [])
142 142
143 143 def lookup(self, key):
144 144 try:
145 145 return self.tags()[key]
146 146 except KeyError:
147 147 try:
148 148 return self.changelog.lookup(key)
149 149 except:
150 150 raise repo.RepoError(_("unknown revision '%s'") % key)
151 151
152 152 def dev(self):
153 153 return os.stat(self.path).st_dev
154 154
155 155 def local(self):
156 156 return True
157 157
158 158 def join(self, f):
159 159 return os.path.join(self.path, f)
160 160
161 161 def wjoin(self, f):
162 162 return os.path.join(self.root, f)
163 163
164 164 def file(self, f):
165 165 if f[0] == '/':
166 166 f = f[1:]
167 167 return filelog.filelog(self.opener, f)
168 168
169 169 def getcwd(self):
170 170 return self.dirstate.getcwd()
171 171
172 172 def wfile(self, f, mode='r'):
173 173 return self.wopener(f, mode)
174 174
175 175 def wread(self, filename):
176 176 if self.encodepats == None:
177 177 l = []
178 178 for pat, cmd in self.ui.configitems("encode"):
179 179 mf = util.matcher("", "/", [pat], [], [])[1]
180 180 l.append((mf, cmd))
181 181 self.encodepats = l
182 182
183 183 data = self.wopener(filename, 'r').read()
184 184
185 185 for mf, cmd in self.encodepats:
186 186 if mf(filename):
187 187 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
188 188 data = util.filter(data, cmd)
189 189 break
190 190
191 191 return data
192 192
193 193 def wwrite(self, filename, data, fd=None):
194 194 if self.decodepats == None:
195 195 l = []
196 196 for pat, cmd in self.ui.configitems("decode"):
197 197 mf = util.matcher("", "/", [pat], [], [])[1]
198 198 l.append((mf, cmd))
199 199 self.decodepats = l
200 200
201 201 for mf, cmd in self.decodepats:
202 202 if mf(filename):
203 203 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
204 204 data = util.filter(data, cmd)
205 205 break
206 206
207 207 if fd:
208 208 return fd.write(data)
209 209 return self.wopener(filename, 'w').write(data)
210 210
211 211 def transaction(self):
212 212 # save dirstate for undo
213 213 try:
214 214 ds = self.opener("dirstate").read()
215 215 except IOError:
216 216 ds = ""
217 217 self.opener("journal.dirstate", "w").write(ds)
218 218
219 219 def after():
220 220 util.rename(self.join("journal"), self.join("undo"))
221 221 util.rename(self.join("journal.dirstate"),
222 222 self.join("undo.dirstate"))
223 223
224 224 return transaction.transaction(self.ui.warn, self.opener,
225 225 self.join("journal"), after)
226 226
227 227 def recover(self):
228 228 lock = self.lock()
229 229 if os.path.exists(self.join("journal")):
230 230 self.ui.status(_("rolling back interrupted transaction\n"))
231 231 transaction.rollback(self.opener, self.join("journal"))
232 232 self.manifest = manifest.manifest(self.opener)
233 233 self.changelog = changelog.changelog(self.opener)
234 234 return True
235 235 else:
236 236 self.ui.warn(_("no interrupted transaction available\n"))
237 237 return False
238 238
239 239 def undo(self, wlock=None):
240 240 if not wlock:
241 241 wlock = self.wlock()
242 242 lock = self.lock()
243 243 if os.path.exists(self.join("undo")):
244 244 self.ui.status(_("rolling back last transaction\n"))
245 245 transaction.rollback(self.opener, self.join("undo"))
246 246 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
247 247 self.dirstate.read()
248 248 else:
249 249 self.ui.warn(_("no undo information available\n"))
250 250
251 251 def lock(self, wait=1):
252 252 try:
253 253 return lock.lock(self.join("lock"), 0)
254 254 except lock.LockHeld, inst:
255 255 if wait:
256 256 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
257 257 return lock.lock(self.join("lock"), wait)
258 258 raise inst
259 259
260 260 def wlock(self, wait=1):
261 261 try:
262 262 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
263 263 except lock.LockHeld, inst:
264 264 if not wait:
265 265 raise inst
266 266 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
267 267 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
268 268 self.dirstate.read()
269 269 return wlock
270 270
271 271 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
272 272 orig_parent = self.dirstate.parents()[0] or nullid
273 273 p1 = p1 or self.dirstate.parents()[0] or nullid
274 274 p2 = p2 or self.dirstate.parents()[1] or nullid
275 275 c1 = self.changelog.read(p1)
276 276 c2 = self.changelog.read(p2)
277 277 m1 = self.manifest.read(c1[0])
278 278 mf1 = self.manifest.readflags(c1[0])
279 279 m2 = self.manifest.read(c2[0])
280 280 changed = []
281 281
282 282 if orig_parent == p1:
283 283 update_dirstate = 1
284 284 else:
285 285 update_dirstate = 0
286 286
287 287 if not wlock:
288 288 wlock = self.wlock()
289 289 lock = self.lock()
290 290 tr = self.transaction()
291 291 mm = m1.copy()
292 292 mfm = mf1.copy()
293 293 linkrev = self.changelog.count()
294 294 for f in files:
295 295 try:
296 296 t = self.wread(f)
297 297 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
298 298 r = self.file(f)
299 299 mfm[f] = tm
300 300
301 301 fp1 = m1.get(f, nullid)
302 302 fp2 = m2.get(f, nullid)
303 303
304 304 # is the same revision on two branches of a merge?
305 305 if fp2 == fp1:
306 306 fp2 = nullid
307 307
308 308 if fp2 != nullid:
309 309 # is one parent an ancestor of the other?
310 310 fpa = r.ancestor(fp1, fp2)
311 311 if fpa == fp1:
312 312 fp1, fp2 = fp2, nullid
313 313 elif fpa == fp2:
314 314 fp2 = nullid
315 315
316 316 # is the file unmodified from the parent?
317 317 if t == r.read(fp1):
318 318 # record the proper existing parent in manifest
319 319 # no need to add a revision
320 320 mm[f] = fp1
321 321 continue
322 322
323 323 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
324 324 changed.append(f)
325 325 if update_dirstate:
326 326 self.dirstate.update([f], "n")
327 327 except IOError:
328 328 try:
329 329 del mm[f]
330 330 del mfm[f]
331 331 if update_dirstate:
332 332 self.dirstate.forget([f])
333 333 except:
334 334 # deleted from p2?
335 335 pass
336 336
337 337 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
338 338 user = user or self.ui.username()
339 339 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
340 340 tr.close()
341 341 if update_dirstate:
342 342 self.dirstate.setparents(n, nullid)
343 343
344 344 def commit(self, files=None, text="", user=None, date=None,
345 345 match=util.always, force=False, wlock=None):
346 346 commit = []
347 347 remove = []
348 348 changed = []
349 349
350 350 if files:
351 351 for f in files:
352 352 s = self.dirstate.state(f)
353 353 if s in 'nmai':
354 354 commit.append(f)
355 355 elif s == 'r':
356 356 remove.append(f)
357 357 else:
358 358 self.ui.warn(_("%s not tracked!\n") % f)
359 359 else:
360 360 modified, added, removed, deleted, unknown = self.changes(match=match)
361 361 commit = modified + added
362 362 remove = removed
363 363
364 364 p1, p2 = self.dirstate.parents()
365 365 c1 = self.changelog.read(p1)
366 366 c2 = self.changelog.read(p2)
367 367 m1 = self.manifest.read(c1[0])
368 368 mf1 = self.manifest.readflags(c1[0])
369 369 m2 = self.manifest.read(c2[0])
370 370
371 371 if not commit and not remove and not force and p2 == nullid:
372 372 self.ui.status(_("nothing changed\n"))
373 373 return None
374 374
375 375 if not self.hook("precommit"):
376 376 return None
377 377
378 378 if not wlock:
379 379 wlock = self.wlock()
380 380 lock = self.lock()
381 381 tr = self.transaction()
382 382
383 383 # check in files
384 384 new = {}
385 385 linkrev = self.changelog.count()
386 386 commit.sort()
387 387 for f in commit:
388 388 self.ui.note(f + "\n")
389 389 try:
390 390 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
391 391 t = self.wread(f)
392 392 except IOError:
393 393 self.ui.warn(_("trouble committing %s!\n") % f)
394 394 raise
395 395
396 396 r = self.file(f)
397 397
398 398 meta = {}
399 399 cp = self.dirstate.copied(f)
400 400 if cp:
401 401 meta["copy"] = cp
402 402 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
403 403 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
404 404 fp1, fp2 = nullid, nullid
405 405 else:
406 406 fp1 = m1.get(f, nullid)
407 407 fp2 = m2.get(f, nullid)
408 408
409 409 if fp2 != nullid:
410 410 # is one parent an ancestor of the other?
411 411 fpa = r.ancestor(fp1, fp2)
412 412 if fpa == fp1:
413 413 fp1, fp2 = fp2, nullid
414 414 elif fpa == fp2:
415 415 fp2 = nullid
416 416
417 417 # is the file unmodified from the parent?
418 418 if not meta and t == r.read(fp1) and fp2 == nullid:
419 419 # record the proper existing parent in manifest
420 420 # no need to add a revision
421 421 new[f] = fp1
422 422 continue
423 423
424 424 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
425 425 # remember what we've added so that we can later calculate
426 426 # the files to pull from a set of changesets
427 427 changed.append(f)
428 428
429 429 # update manifest
430 430 m1 = m1.copy()
431 431 m1.update(new)
432 432 for f in remove:
433 433 if f in m1:
434 434 del m1[f]
435 435 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
436 436 (new, remove))
437 437
438 438 # add changeset
439 439 new = new.keys()
440 440 new.sort()
441 441
442 442 if not text:
443 443 edittext = [""]
444 444 if p2 != nullid:
445 445 edittext.append("HG: branch merge")
446 446 edittext.extend(["HG: changed %s" % f for f in changed])
447 447 edittext.extend(["HG: removed %s" % f for f in remove])
448 448 if not changed and not remove:
449 449 edittext.append("HG: no files changed")
450 450 edittext.append("")
451 451 # run editor in the repository root
452 452 olddir = os.getcwd()
453 453 os.chdir(self.root)
454 454 edittext = self.ui.edit("\n".join(edittext))
455 455 os.chdir(olddir)
456 456 if not edittext.rstrip():
457 457 return None
458 458 text = edittext
459 459
460 460 user = user or self.ui.username()
461 461 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
462 462 tr.close()
463 463
464 464 self.dirstate.setparents(n)
465 465 self.dirstate.update(new, "n")
466 466 self.dirstate.forget(remove)
467 467
468 if not self.hook("commit", node=hex(n)):
469 return None
468 self.hook("commit", node=hex(n))
470 469 return n
471 470
472 471 def walk(self, node=None, files=[], match=util.always):
473 472 if node:
474 473 fdict = dict.fromkeys(files)
475 474 for fn in self.manifest.read(self.changelog.read(node)[0]):
476 475 fdict.pop(fn, None)
477 476 if match(fn):
478 477 yield 'm', fn
479 478 for fn in fdict:
480 479 self.ui.warn(_('%s: No such file in rev %s\n') % (
481 480 util.pathto(self.getcwd(), fn), short(node)))
482 481 else:
483 482 for src, fn in self.dirstate.walk(files, match):
484 483 yield src, fn
485 484
486 485 def changes(self, node1=None, node2=None, files=[], match=util.always,
487 486 wlock=None):
488 487 """return changes between two nodes or node and working directory
489 488
490 489 If node1 is None, use the first dirstate parent instead.
491 490 If node2 is None, compare node1 with working directory.
492 491 """
493 492
494 493 def fcmp(fn, mf):
495 494 t1 = self.wread(fn)
496 495 t2 = self.file(fn).read(mf.get(fn, nullid))
497 496 return cmp(t1, t2)
498 497
499 498 def mfmatches(node):
500 499 change = self.changelog.read(node)
501 500 mf = dict(self.manifest.read(change[0]))
502 501 for fn in mf.keys():
503 502 if not match(fn):
504 503 del mf[fn]
505 504 return mf
506 505
507 506 # are we comparing the working directory?
508 507 if not node2:
509 508 if not wlock:
510 509 try:
511 510 wlock = self.wlock(wait=0)
512 511 except lock.LockHeld:
513 512 wlock = None
514 513 lookup, modified, added, removed, deleted, unknown = (
515 514 self.dirstate.changes(files, match))
516 515
517 516 # are we comparing working dir against its parent?
518 517 if not node1:
519 518 if lookup:
520 519 # do a full compare of any files that might have changed
521 520 mf2 = mfmatches(self.dirstate.parents()[0])
522 521 for f in lookup:
523 522 if fcmp(f, mf2):
524 523 modified.append(f)
525 524 elif wlock is not None:
526 525 self.dirstate.update([f], "n")
527 526 else:
528 527 # we are comparing working dir against non-parent
529 528 # generate a pseudo-manifest for the working dir
530 529 mf2 = mfmatches(self.dirstate.parents()[0])
531 530 for f in lookup + modified + added:
532 531 mf2[f] = ""
533 532 for f in removed:
534 533 if f in mf2:
535 534 del mf2[f]
536 535 else:
537 536 # we are comparing two revisions
538 537 deleted, unknown = [], []
539 538 mf2 = mfmatches(node2)
540 539
541 540 if node1:
542 541 # flush lists from dirstate before comparing manifests
543 542 modified, added = [], []
544 543
545 544 mf1 = mfmatches(node1)
546 545
547 546 for fn in mf2:
548 547 if mf1.has_key(fn):
549 548 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
550 549 modified.append(fn)
551 550 del mf1[fn]
552 551 else:
553 552 added.append(fn)
554 553
555 554 removed = mf1.keys()
556 555
557 556 # sort and return results:
558 557 for l in modified, added, removed, deleted, unknown:
559 558 l.sort()
560 559 return (modified, added, removed, deleted, unknown)
561 560
562 561 def add(self, list, wlock=None):
563 562 if not wlock:
564 563 wlock = self.wlock()
565 564 for f in list:
566 565 p = self.wjoin(f)
567 566 if not os.path.exists(p):
568 567 self.ui.warn(_("%s does not exist!\n") % f)
569 568 elif not os.path.isfile(p):
570 569 self.ui.warn(_("%s not added: only files supported currently\n")
571 570 % f)
572 571 elif self.dirstate.state(f) in 'an':
573 572 self.ui.warn(_("%s already tracked!\n") % f)
574 573 else:
575 574 self.dirstate.update([f], "a")
576 575
577 576 def forget(self, list, wlock=None):
578 577 if not wlock:
579 578 wlock = self.wlock()
580 579 for f in list:
581 580 if self.dirstate.state(f) not in 'ai':
582 581 self.ui.warn(_("%s not added!\n") % f)
583 582 else:
584 583 self.dirstate.forget([f])
585 584
586 585 def remove(self, list, unlink=False, wlock=None):
587 586 if unlink:
588 587 for f in list:
589 588 try:
590 589 util.unlink(self.wjoin(f))
591 590 except OSError, inst:
592 591 if inst.errno != errno.ENOENT:
593 592 raise
594 593 if not wlock:
595 594 wlock = self.wlock()
596 595 for f in list:
597 596 p = self.wjoin(f)
598 597 if os.path.exists(p):
599 598 self.ui.warn(_("%s still exists!\n") % f)
600 599 elif self.dirstate.state(f) == 'a':
601 600 self.ui.warn(_("%s never committed!\n") % f)
602 601 self.dirstate.forget([f])
603 602 elif f not in self.dirstate:
604 603 self.ui.warn(_("%s not tracked!\n") % f)
605 604 else:
606 605 self.dirstate.update([f], "r")
607 606
608 607 def undelete(self, list, wlock=None):
609 608 p = self.dirstate.parents()[0]
610 609 mn = self.changelog.read(p)[0]
611 610 mf = self.manifest.readflags(mn)
612 611 m = self.manifest.read(mn)
613 612 if not wlock:
614 613 wlock = self.wlock()
615 614 for f in list:
616 615 if self.dirstate.state(f) not in "r":
617 616 self.ui.warn("%s not removed!\n" % f)
618 617 else:
619 618 t = self.file(f).read(m[f])
620 619 self.wwrite(f, t)
621 620 util.set_exec(self.wjoin(f), mf[f])
622 621 self.dirstate.update([f], "n")
623 622
624 623 def copy(self, source, dest, wlock=None):
625 624 p = self.wjoin(dest)
626 625 if not os.path.exists(p):
627 626 self.ui.warn(_("%s does not exist!\n") % dest)
628 627 elif not os.path.isfile(p):
629 628 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
630 629 else:
631 630 if not wlock:
632 631 wlock = self.wlock()
633 632 if self.dirstate.state(dest) == '?':
634 633 self.dirstate.update([dest], "a")
635 634 self.dirstate.copy(source, dest)
636 635
637 636 def heads(self, start=None):
638 637 heads = self.changelog.heads(start)
639 638 # sort the output in rev descending order
640 639 heads = [(-self.changelog.rev(h), h) for h in heads]
641 640 heads.sort()
642 641 return [n for (r, n) in heads]
643 642
644 643 # branchlookup returns a dict giving a list of branches for
645 644 # each head. A branch is defined as the tag of a node or
646 645 # the branch of the node's parents. If a node has multiple
647 646 # branch tags, tags are eliminated if they are visible from other
648 647 # branch tags.
649 648 #
650 649 # So, for this graph: a->b->c->d->e
651 650 # \ /
652 651 # aa -----/
653 652 # a has tag 2.6.12
654 653 # d has tag 2.6.13
655 654 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
656 655 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
657 656 # from the list.
658 657 #
659 658 # It is possible that more than one head will have the same branch tag.
660 659 # callers need to check the result for multiple heads under the same
661 660 # branch tag if that is a problem for them (ie checkout of a specific
662 661 # branch).
663 662 #
664 663 # passing in a specific branch will limit the depth of the search
665 664 # through the parents. It won't limit the branches returned in the
666 665 # result though.
667 666 def branchlookup(self, heads=None, branch=None):
668 667 if not heads:
669 668 heads = self.heads()
670 669 headt = [ h for h in heads ]
671 670 chlog = self.changelog
672 671 branches = {}
673 672 merges = []
674 673 seenmerge = {}
675 674
676 675 # traverse the tree once for each head, recording in the branches
677 676 # dict which tags are visible from this head. The branches
678 677 # dict also records which tags are visible from each tag
679 678 # while we traverse.
680 679 while headt or merges:
681 680 if merges:
682 681 n, found = merges.pop()
683 682 visit = [n]
684 683 else:
685 684 h = headt.pop()
686 685 visit = [h]
687 686 found = [h]
688 687 seen = {}
689 688 while visit:
690 689 n = visit.pop()
691 690 if n in seen:
692 691 continue
693 692 pp = chlog.parents(n)
694 693 tags = self.nodetags(n)
695 694 if tags:
696 695 for x in tags:
697 696 if x == 'tip':
698 697 continue
699 698 for f in found:
700 699 branches.setdefault(f, {})[n] = 1
701 700 branches.setdefault(n, {})[n] = 1
702 701 break
703 702 if n not in found:
704 703 found.append(n)
705 704 if branch in tags:
706 705 continue
707 706 seen[n] = 1
708 707 if pp[1] != nullid and n not in seenmerge:
709 708 merges.append((pp[1], [x for x in found]))
710 709 seenmerge[n] = 1
711 710 if pp[0] != nullid:
712 711 visit.append(pp[0])
713 712 # traverse the branches dict, eliminating branch tags from each
714 713 # head that are visible from another branch tag for that head.
715 714 out = {}
716 715 viscache = {}
717 716 for h in heads:
718 717 def visible(node):
719 718 if node in viscache:
720 719 return viscache[node]
721 720 ret = {}
722 721 visit = [node]
723 722 while visit:
724 723 x = visit.pop()
725 724 if x in viscache:
726 725 ret.update(viscache[x])
727 726 elif x not in ret:
728 727 ret[x] = 1
729 728 if x in branches:
730 729 visit[len(visit):] = branches[x].keys()
731 730 viscache[node] = ret
732 731 return ret
733 732 if h not in branches:
734 733 continue
735 734 # O(n^2), but somewhat limited. This only searches the
736 735 # tags visible from a specific head, not all the tags in the
737 736 # whole repo.
738 737 for b in branches[h]:
739 738 vis = False
740 739 for bb in branches[h].keys():
741 740 if b != bb:
742 741 if b in visible(bb):
743 742 vis = True
744 743 break
745 744 if not vis:
746 745 l = out.setdefault(h, [])
747 746 l[len(l):] = self.nodetags(b)
748 747 return out
749 748
750 749 def branches(self, nodes):
751 750 if not nodes:
752 751 nodes = [self.changelog.tip()]
753 752 b = []
754 753 for n in nodes:
755 754 t = n
756 755 while n:
757 756 p = self.changelog.parents(n)
758 757 if p[1] != nullid or p[0] == nullid:
759 758 b.append((t, n, p[0], p[1]))
760 759 break
761 760 n = p[0]
762 761 return b
763 762
764 763 def between(self, pairs):
765 764 r = []
766 765
767 766 for top, bottom in pairs:
768 767 n, l, i = top, [], 0
769 768 f = 1
770 769
771 770 while n != bottom:
772 771 p = self.changelog.parents(n)[0]
773 772 if i == f:
774 773 l.append(n)
775 774 f = f * 2
776 775 n = p
777 776 i += 1
778 777
779 778 r.append(l)
780 779
781 780 return r
782 781
783 782 def findincoming(self, remote, base=None, heads=None):
784 783 m = self.changelog.nodemap
785 784 search = []
786 785 fetch = {}
787 786 seen = {}
788 787 seenbranch = {}
789 788 if base == None:
790 789 base = {}
791 790
792 791 # assume we're closer to the tip than the root
793 792 # and start by examining the heads
794 793 self.ui.status(_("searching for changes\n"))
795 794
796 795 if not heads:
797 796 heads = remote.heads()
798 797
799 798 unknown = []
800 799 for h in heads:
801 800 if h not in m:
802 801 unknown.append(h)
803 802 else:
804 803 base[h] = 1
805 804
806 805 if not unknown:
807 806 return None
808 807
809 808 rep = {}
810 809 reqcnt = 0
811 810
812 811 # search through remote branches
813 812 # a 'branch' here is a linear segment of history, with four parts:
814 813 # head, root, first parent, second parent
815 814 # (a branch always has two parents (or none) by definition)
816 815 unknown = remote.branches(unknown)
817 816 while unknown:
818 817 r = []
819 818 while unknown:
820 819 n = unknown.pop(0)
821 820 if n[0] in seen:
822 821 continue
823 822
824 823 self.ui.debug(_("examining %s:%s\n")
825 824 % (short(n[0]), short(n[1])))
826 825 if n[0] == nullid:
827 826 break
828 827 if n in seenbranch:
829 828 self.ui.debug(_("branch already found\n"))
830 829 continue
831 830 if n[1] and n[1] in m: # do we know the base?
832 831 self.ui.debug(_("found incomplete branch %s:%s\n")
833 832 % (short(n[0]), short(n[1])))
834 833 search.append(n) # schedule branch range for scanning
835 834 seenbranch[n] = 1
836 835 else:
837 836 if n[1] not in seen and n[1] not in fetch:
838 837 if n[2] in m and n[3] in m:
839 838 self.ui.debug(_("found new changeset %s\n") %
840 839 short(n[1]))
841 840 fetch[n[1]] = 1 # earliest unknown
842 841 base[n[2]] = 1 # latest known
843 842 continue
844 843
845 844 for a in n[2:4]:
846 845 if a not in rep:
847 846 r.append(a)
848 847 rep[a] = 1
849 848
850 849 seen[n[0]] = 1
851 850
852 851 if r:
853 852 reqcnt += 1
854 853 self.ui.debug(_("request %d: %s\n") %
855 854 (reqcnt, " ".join(map(short, r))))
856 855 for p in range(0, len(r), 10):
857 856 for b in remote.branches(r[p:p+10]):
858 857 self.ui.debug(_("received %s:%s\n") %
859 858 (short(b[0]), short(b[1])))
860 859 if b[0] in m:
861 860 self.ui.debug(_("found base node %s\n")
862 861 % short(b[0]))
863 862 base[b[0]] = 1
864 863 elif b[0] not in seen:
865 864 unknown.append(b)
866 865
867 866 # do binary search on the branches we found
868 867 while search:
869 868 n = search.pop(0)
870 869 reqcnt += 1
871 870 l = remote.between([(n[0], n[1])])[0]
872 871 l.append(n[1])
873 872 p = n[0]
874 873 f = 1
875 874 for i in l:
876 875 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
877 876 if i in m:
878 877 if f <= 2:
879 878 self.ui.debug(_("found new branch changeset %s\n") %
880 879 short(p))
881 880 fetch[p] = 1
882 881 base[i] = 1
883 882 else:
884 883 self.ui.debug(_("narrowed branch search to %s:%s\n")
885 884 % (short(p), short(i)))
886 885 search.append((p, i))
887 886 break
888 887 p, f = i, f * 2
889 888
890 889 # sanity check our fetch list
891 890 for f in fetch.keys():
892 891 if f in m:
893 892 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
894 893
895 894 if base.keys() == [nullid]:
896 895 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
897 896
898 897 self.ui.note(_("found new changesets starting at ") +
899 898 " ".join([short(f) for f in fetch]) + "\n")
900 899
901 900 self.ui.debug(_("%d total queries\n") % reqcnt)
902 901
903 902 return fetch.keys()
904 903
905 904 def findoutgoing(self, remote, base=None, heads=None):
906 905 if base == None:
907 906 base = {}
908 907 self.findincoming(remote, base, heads)
909 908
910 909 self.ui.debug(_("common changesets up to ")
911 910 + " ".join(map(short, base.keys())) + "\n")
912 911
913 912 remain = dict.fromkeys(self.changelog.nodemap)
914 913
915 914 # prune everything remote has from the tree
916 915 del remain[nullid]
917 916 remove = base.keys()
918 917 while remove:
919 918 n = remove.pop(0)
920 919 if n in remain:
921 920 del remain[n]
922 921 for p in self.changelog.parents(n):
923 922 remove.append(p)
924 923
925 924 # find every node whose parents have been pruned
926 925 subset = []
927 926 for n in remain:
928 927 p1, p2 = self.changelog.parents(n)
929 928 if p1 not in remain and p2 not in remain:
930 929 subset.append(n)
931 930
932 931 # this is the set of all roots we have to push
933 932 return subset
934 933
935 934 def pull(self, remote, heads=None):
936 935 lock = self.lock()
937 936
938 937 # if we have an empty repo, fetch everything
939 938 if self.changelog.tip() == nullid:
940 939 self.ui.status(_("requesting all changes\n"))
941 940 fetch = [nullid]
942 941 else:
943 942 fetch = self.findincoming(remote)
944 943
945 944 if not fetch:
946 945 self.ui.status(_("no changes found\n"))
947 946 return 1
948 947
949 948 if heads is None:
950 949 cg = remote.changegroup(fetch)
951 950 else:
952 951 cg = remote.changegroupsubset(fetch, heads)
953 952 return self.addchangegroup(cg)
954 953
955 954 def push(self, remote, force=False):
956 955 lock = remote.lock()
957 956
958 957 base = {}
959 958 heads = remote.heads()
960 959 inc = self.findincoming(remote, base, heads)
961 960 if not force and inc:
962 961 self.ui.warn(_("abort: unsynced remote changes!\n"))
963 962 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
964 963 return 1
965 964
966 965 update = self.findoutgoing(remote, base)
967 966 if not update:
968 967 self.ui.status(_("no changes found\n"))
969 968 return 1
970 969 elif not force:
971 970 if len(heads) < len(self.changelog.heads()):
972 971 self.ui.warn(_("abort: push creates new remote branches!\n"))
973 972 self.ui.status(_("(did you forget to merge?"
974 973 " use push -f to force)\n"))
975 974 return 1
976 975
977 976 cg = self.changegroup(update)
978 977 return remote.addchangegroup(cg)
979 978
980 979 def changegroupsubset(self, bases, heads):
981 980 """This function generates a changegroup consisting of all the nodes
982 981 that are descendents of any of the bases, and ancestors of any of
983 982 the heads.
984 983
985 984 It is fairly complex as determining which filenodes and which
986 985 manifest nodes need to be included for the changeset to be complete
987 986 is non-trivial.
988 987
989 988 Another wrinkle is doing the reverse, figuring out which changeset in
990 989 the changegroup a particular filenode or manifestnode belongs to."""
991 990
992 991 # Set up some initial variables
993 992 # Make it easy to refer to self.changelog
994 993 cl = self.changelog
995 994 # msng is short for missing - compute the list of changesets in this
996 995 # changegroup.
997 996 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
998 997 # Some bases may turn out to be superfluous, and some heads may be
999 998 # too. nodesbetween will return the minimal set of bases and heads
1000 999 # necessary to re-create the changegroup.
1001 1000
1002 1001 # Known heads are the list of heads that it is assumed the recipient
1003 1002 # of this changegroup will know about.
1004 1003 knownheads = {}
1005 1004 # We assume that all parents of bases are known heads.
1006 1005 for n in bases:
1007 1006 for p in cl.parents(n):
1008 1007 if p != nullid:
1009 1008 knownheads[p] = 1
1010 1009 knownheads = knownheads.keys()
1011 1010 if knownheads:
1012 1011 # Now that we know what heads are known, we can compute which
1013 1012 # changesets are known. The recipient must know about all
1014 1013 # changesets required to reach the known heads from the null
1015 1014 # changeset.
1016 1015 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1017 1016 junk = None
1018 1017 # Transform the list into an ersatz set.
1019 1018 has_cl_set = dict.fromkeys(has_cl_set)
1020 1019 else:
1021 1020 # If there were no known heads, the recipient cannot be assumed to
1022 1021 # know about any changesets.
1023 1022 has_cl_set = {}
1024 1023
1025 1024 # Make it easy to refer to self.manifest
1026 1025 mnfst = self.manifest
1027 1026 # We don't know which manifests are missing yet
1028 1027 msng_mnfst_set = {}
1029 1028 # Nor do we know which filenodes are missing.
1030 1029 msng_filenode_set = {}
1031 1030
1032 1031 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1033 1032 junk = None
1034 1033
1035 1034 # A changeset always belongs to itself, so the changenode lookup
1036 1035 # function for a changenode is identity.
1037 1036 def identity(x):
1038 1037 return x
1039 1038
1040 1039 # A function generating function. Sets up an environment for the
1041 1040 # inner function.
1042 1041 def cmp_by_rev_func(revlog):
1043 1042 # Compare two nodes by their revision number in the environment's
1044 1043 # revision history. Since the revision number both represents the
1045 1044 # most efficient order to read the nodes in, and represents a
1046 1045 # topological sorting of the nodes, this function is often useful.
1047 1046 def cmp_by_rev(a, b):
1048 1047 return cmp(revlog.rev(a), revlog.rev(b))
1049 1048 return cmp_by_rev
1050 1049
1051 1050 # If we determine that a particular file or manifest node must be a
1052 1051 # node that the recipient of the changegroup will already have, we can
1053 1052 # also assume the recipient will have all the parents. This function
1054 1053 # prunes them from the set of missing nodes.
1055 1054 def prune_parents(revlog, hasset, msngset):
1056 1055 haslst = hasset.keys()
1057 1056 haslst.sort(cmp_by_rev_func(revlog))
1058 1057 for node in haslst:
1059 1058 parentlst = [p for p in revlog.parents(node) if p != nullid]
1060 1059 while parentlst:
1061 1060 n = parentlst.pop()
1062 1061 if n not in hasset:
1063 1062 hasset[n] = 1
1064 1063 p = [p for p in revlog.parents(n) if p != nullid]
1065 1064 parentlst.extend(p)
1066 1065 for n in hasset:
1067 1066 msngset.pop(n, None)
1068 1067
1069 1068 # This is a function generating function used to set up an environment
1070 1069 # for the inner function to execute in.
1071 1070 def manifest_and_file_collector(changedfileset):
1072 1071 # This is an information gathering function that gathers
1073 1072 # information from each changeset node that goes out as part of
1074 1073 # the changegroup. The information gathered is a list of which
1075 1074 # manifest nodes are potentially required (the recipient may
1076 1075 # already have them) and total list of all files which were
1077 1076 # changed in any changeset in the changegroup.
1078 1077 #
1079 1078 # We also remember the first changenode we saw any manifest
1080 1079 # referenced by so we can later determine which changenode 'owns'
1081 1080 # the manifest.
1082 1081 def collect_manifests_and_files(clnode):
1083 1082 c = cl.read(clnode)
1084 1083 for f in c[3]:
1085 1084 # This is to make sure we only have one instance of each
1086 1085 # filename string for each filename.
1087 1086 changedfileset.setdefault(f, f)
1088 1087 msng_mnfst_set.setdefault(c[0], clnode)
1089 1088 return collect_manifests_and_files
1090 1089
1091 1090 # Figure out which manifest nodes (of the ones we think might be part
1092 1091 # of the changegroup) the recipient must know about and remove them
1093 1092 # from the changegroup.
1094 1093 def prune_manifests():
1095 1094 has_mnfst_set = {}
1096 1095 for n in msng_mnfst_set:
1097 1096 # If a 'missing' manifest thinks it belongs to a changenode
1098 1097 # the recipient is assumed to have, obviously the recipient
1099 1098 # must have that manifest.
1100 1099 linknode = cl.node(mnfst.linkrev(n))
1101 1100 if linknode in has_cl_set:
1102 1101 has_mnfst_set[n] = 1
1103 1102 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1104 1103
1105 1104 # Use the information collected in collect_manifests_and_files to say
1106 1105 # which changenode any manifestnode belongs to.
1107 1106 def lookup_manifest_link(mnfstnode):
1108 1107 return msng_mnfst_set[mnfstnode]
1109 1108
1110 1109 # A function generating function that sets up the initial environment
1111 1110 # the inner function.
1112 1111 def filenode_collector(changedfiles):
1113 1112 next_rev = [0]
1114 1113 # This gathers information from each manifestnode included in the
1115 1114 # changegroup about which filenodes the manifest node references
1116 1115 # so we can include those in the changegroup too.
1117 1116 #
1118 1117 # It also remembers which changenode each filenode belongs to. It
1119 1118 # does this by assuming the a filenode belongs to the changenode
1120 1119 # the first manifest that references it belongs to.
1121 1120 def collect_msng_filenodes(mnfstnode):
1122 1121 r = mnfst.rev(mnfstnode)
1123 1122 if r == next_rev[0]:
1124 1123 # If the last rev we looked at was the one just previous,
1125 1124 # we only need to see a diff.
1126 1125 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1127 1126 # For each line in the delta
1128 1127 for dline in delta.splitlines():
1129 1128 # get the filename and filenode for that line
1130 1129 f, fnode = dline.split('\0')
1131 1130 fnode = bin(fnode[:40])
1132 1131 f = changedfiles.get(f, None)
1133 1132 # And if the file is in the list of files we care
1134 1133 # about.
1135 1134 if f is not None:
1136 1135 # Get the changenode this manifest belongs to
1137 1136 clnode = msng_mnfst_set[mnfstnode]
1138 1137 # Create the set of filenodes for the file if
1139 1138 # there isn't one already.
1140 1139 ndset = msng_filenode_set.setdefault(f, {})
1141 1140 # And set the filenode's changelog node to the
1142 1141 # manifest's if it hasn't been set already.
1143 1142 ndset.setdefault(fnode, clnode)
1144 1143 else:
1145 1144 # Otherwise we need a full manifest.
1146 1145 m = mnfst.read(mnfstnode)
1147 1146 # For every file in we care about.
1148 1147 for f in changedfiles:
1149 1148 fnode = m.get(f, None)
1150 1149 # If it's in the manifest
1151 1150 if fnode is not None:
1152 1151 # See comments above.
1153 1152 clnode = msng_mnfst_set[mnfstnode]
1154 1153 ndset = msng_filenode_set.setdefault(f, {})
1155 1154 ndset.setdefault(fnode, clnode)
1156 1155 # Remember the revision we hope to see next.
1157 1156 next_rev[0] = r + 1
1158 1157 return collect_msng_filenodes
1159 1158
1160 1159 # We have a list of filenodes we think we need for a file, lets remove
1161 1160 # all those we now the recipient must have.
1162 1161 def prune_filenodes(f, filerevlog):
1163 1162 msngset = msng_filenode_set[f]
1164 1163 hasset = {}
1165 1164 # If a 'missing' filenode thinks it belongs to a changenode we
1166 1165 # assume the recipient must have, then the recipient must have
1167 1166 # that filenode.
1168 1167 for n in msngset:
1169 1168 clnode = cl.node(filerevlog.linkrev(n))
1170 1169 if clnode in has_cl_set:
1171 1170 hasset[n] = 1
1172 1171 prune_parents(filerevlog, hasset, msngset)
1173 1172
1174 1173 # A function generator function that sets up the a context for the
1175 1174 # inner function.
1176 1175 def lookup_filenode_link_func(fname):
1177 1176 msngset = msng_filenode_set[fname]
1178 1177 # Lookup the changenode the filenode belongs to.
1179 1178 def lookup_filenode_link(fnode):
1180 1179 return msngset[fnode]
1181 1180 return lookup_filenode_link
1182 1181
1183 1182 # Now that we have all theses utility functions to help out and
1184 1183 # logically divide up the task, generate the group.
1185 1184 def gengroup():
1186 1185 # The set of changed files starts empty.
1187 1186 changedfiles = {}
1188 1187 # Create a changenode group generator that will call our functions
1189 1188 # back to lookup the owning changenode and collect information.
1190 1189 group = cl.group(msng_cl_lst, identity,
1191 1190 manifest_and_file_collector(changedfiles))
1192 1191 for chnk in group:
1193 1192 yield chnk
1194 1193
1195 1194 # The list of manifests has been collected by the generator
1196 1195 # calling our functions back.
1197 1196 prune_manifests()
1198 1197 msng_mnfst_lst = msng_mnfst_set.keys()
1199 1198 # Sort the manifestnodes by revision number.
1200 1199 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1201 1200 # Create a generator for the manifestnodes that calls our lookup
1202 1201 # and data collection functions back.
1203 1202 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1204 1203 filenode_collector(changedfiles))
1205 1204 for chnk in group:
1206 1205 yield chnk
1207 1206
1208 1207 # These are no longer needed, dereference and toss the memory for
1209 1208 # them.
1210 1209 msng_mnfst_lst = None
1211 1210 msng_mnfst_set.clear()
1212 1211
1213 1212 changedfiles = changedfiles.keys()
1214 1213 changedfiles.sort()
1215 1214 # Go through all our files in order sorted by name.
1216 1215 for fname in changedfiles:
1217 1216 filerevlog = self.file(fname)
1218 1217 # Toss out the filenodes that the recipient isn't really
1219 1218 # missing.
1220 1219 if msng_filenode_set.has_key(fname):
1221 1220 prune_filenodes(fname, filerevlog)
1222 1221 msng_filenode_lst = msng_filenode_set[fname].keys()
1223 1222 else:
1224 1223 msng_filenode_lst = []
1225 1224 # If any filenodes are left, generate the group for them,
1226 1225 # otherwise don't bother.
1227 1226 if len(msng_filenode_lst) > 0:
1228 1227 yield struct.pack(">l", len(fname) + 4) + fname
1229 1228 # Sort the filenodes by their revision #
1230 1229 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1231 1230 # Create a group generator and only pass in a changenode
1232 1231 # lookup function as we need to collect no information
1233 1232 # from filenodes.
1234 1233 group = filerevlog.group(msng_filenode_lst,
1235 1234 lookup_filenode_link_func(fname))
1236 1235 for chnk in group:
1237 1236 yield chnk
1238 1237 if msng_filenode_set.has_key(fname):
1239 1238 # Don't need this anymore, toss it to free memory.
1240 1239 del msng_filenode_set[fname]
1241 1240 # Signal that no more groups are left.
1242 1241 yield struct.pack(">l", 0)
1243 1242
1244 1243 return util.chunkbuffer(gengroup())
1245 1244
1246 1245 def changegroup(self, basenodes):
1247 1246 """Generate a changegroup of all nodes that we have that a recipient
1248 1247 doesn't.
1249 1248
1250 1249 This is much easier than the previous function as we can assume that
1251 1250 the recipient has any changenode we aren't sending them."""
1252 1251 cl = self.changelog
1253 1252 nodes = cl.nodesbetween(basenodes, None)[0]
1254 1253 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1255 1254
1256 1255 def identity(x):
1257 1256 return x
1258 1257
1259 1258 def gennodelst(revlog):
1260 1259 for r in xrange(0, revlog.count()):
1261 1260 n = revlog.node(r)
1262 1261 if revlog.linkrev(n) in revset:
1263 1262 yield n
1264 1263
1265 1264 def changed_file_collector(changedfileset):
1266 1265 def collect_changed_files(clnode):
1267 1266 c = cl.read(clnode)
1268 1267 for fname in c[3]:
1269 1268 changedfileset[fname] = 1
1270 1269 return collect_changed_files
1271 1270
1272 1271 def lookuprevlink_func(revlog):
1273 1272 def lookuprevlink(n):
1274 1273 return cl.node(revlog.linkrev(n))
1275 1274 return lookuprevlink
1276 1275
1277 1276 def gengroup():
1278 1277 # construct a list of all changed files
1279 1278 changedfiles = {}
1280 1279
1281 1280 for chnk in cl.group(nodes, identity,
1282 1281 changed_file_collector(changedfiles)):
1283 1282 yield chnk
1284 1283 changedfiles = changedfiles.keys()
1285 1284 changedfiles.sort()
1286 1285
1287 1286 mnfst = self.manifest
1288 1287 nodeiter = gennodelst(mnfst)
1289 1288 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1290 1289 yield chnk
1291 1290
1292 1291 for fname in changedfiles:
1293 1292 filerevlog = self.file(fname)
1294 1293 nodeiter = gennodelst(filerevlog)
1295 1294 nodeiter = list(nodeiter)
1296 1295 if nodeiter:
1297 1296 yield struct.pack(">l", len(fname) + 4) + fname
1298 1297 lookup = lookuprevlink_func(filerevlog)
1299 1298 for chnk in filerevlog.group(nodeiter, lookup):
1300 1299 yield chnk
1301 1300
1302 1301 yield struct.pack(">l", 0)
1303 1302
1304 1303 return util.chunkbuffer(gengroup())
1305 1304
1306 1305 def addchangegroup(self, source):
1307 1306
1308 1307 def getchunk():
1309 1308 d = source.read(4)
1310 1309 if not d:
1311 1310 return ""
1312 1311 l = struct.unpack(">l", d)[0]
1313 1312 if l <= 4:
1314 1313 return ""
1315 1314 d = source.read(l - 4)
1316 1315 if len(d) < l - 4:
1317 1316 raise repo.RepoError(_("premature EOF reading chunk"
1318 1317 " (got %d bytes, expected %d)")
1319 1318 % (len(d), l - 4))
1320 1319 return d
1321 1320
1322 1321 def getgroup():
1323 1322 while 1:
1324 1323 c = getchunk()
1325 1324 if not c:
1326 1325 break
1327 1326 yield c
1328 1327
1329 1328 def csmap(x):
1330 1329 self.ui.debug(_("add changeset %s\n") % short(x))
1331 1330 return self.changelog.count()
1332 1331
1333 1332 def revmap(x):
1334 1333 return self.changelog.rev(x)
1335 1334
1336 1335 if not source:
1337 1336 return
1338 1337 changesets = files = revisions = 0
1339 1338
1340 1339 tr = self.transaction()
1341 1340
1342 1341 oldheads = len(self.changelog.heads())
1343 1342
1344 1343 # pull off the changeset group
1345 1344 self.ui.status(_("adding changesets\n"))
1346 1345 co = self.changelog.tip()
1347 1346 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1348 1347 cnr, cor = map(self.changelog.rev, (cn, co))
1349 1348 if cn == nullid:
1350 1349 cnr = cor
1351 1350 changesets = cnr - cor
1352 1351
1353 1352 # pull off the manifest group
1354 1353 self.ui.status(_("adding manifests\n"))
1355 1354 mm = self.manifest.tip()
1356 1355 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1357 1356
1358 1357 # process the files
1359 1358 self.ui.status(_("adding file changes\n"))
1360 1359 while 1:
1361 1360 f = getchunk()
1362 1361 if not f:
1363 1362 break
1364 1363 self.ui.debug(_("adding %s revisions\n") % f)
1365 1364 fl = self.file(f)
1366 1365 o = fl.count()
1367 1366 n = fl.addgroup(getgroup(), revmap, tr)
1368 1367 revisions += fl.count() - o
1369 1368 files += 1
1370 1369
1371 1370 newheads = len(self.changelog.heads())
1372 1371 heads = ""
1373 1372 if oldheads and newheads > oldheads:
1374 1373 heads = _(" (+%d heads)") % (newheads - oldheads)
1375 1374
1376 1375 self.ui.status(_("added %d changesets"
1377 1376 " with %d changes to %d files%s\n")
1378 1377 % (changesets, revisions, files, heads))
1379 1378
1380 1379 tr.close()
1381 1380
1382 1381 if changesets > 0:
1383 if not self.hook("changegroup",
1384 node=hex(self.changelog.node(cor+1))):
1385 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1386 return 1
1382 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1387 1383
1388 1384 for i in range(cor + 1, cnr + 1):
1389 1385 self.hook("incoming", node=hex(self.changelog.node(i)))
1390 1386
1391 return
1392
1393 1387 def update(self, node, allow=False, force=False, choose=None,
1394 1388 moddirstate=True, forcemerge=False, wlock=None):
1395 1389 pl = self.dirstate.parents()
1396 1390 if not force and pl[1] != nullid:
1397 1391 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1398 1392 return 1
1399 1393
1400 1394 err = False
1401 1395
1402 1396 p1, p2 = pl[0], node
1403 1397 pa = self.changelog.ancestor(p1, p2)
1404 1398 m1n = self.changelog.read(p1)[0]
1405 1399 m2n = self.changelog.read(p2)[0]
1406 1400 man = self.manifest.ancestor(m1n, m2n)
1407 1401 m1 = self.manifest.read(m1n)
1408 1402 mf1 = self.manifest.readflags(m1n)
1409 1403 m2 = self.manifest.read(m2n).copy()
1410 1404 mf2 = self.manifest.readflags(m2n)
1411 1405 ma = self.manifest.read(man)
1412 1406 mfa = self.manifest.readflags(man)
1413 1407
1414 1408 modified, added, removed, deleted, unknown = self.changes()
1415 1409
1416 1410 # is this a jump, or a merge? i.e. is there a linear path
1417 1411 # from p1 to p2?
1418 1412 linear_path = (pa == p1 or pa == p2)
1419 1413
1420 1414 if allow and linear_path:
1421 1415 raise util.Abort(_("there is nothing to merge, "
1422 1416 "just use 'hg update'"))
1423 1417 if allow and not forcemerge:
1424 1418 if modified or added or removed:
1425 1419 raise util.Abort(_("outstanding uncommited changes"))
1426 1420 if not forcemerge and not force:
1427 1421 for f in unknown:
1428 1422 if f in m2:
1429 1423 t1 = self.wread(f)
1430 1424 t2 = self.file(f).read(m2[f])
1431 1425 if cmp(t1, t2) != 0:
1432 1426 raise util.Abort(_("'%s' already exists in the working"
1433 1427 " dir and differs from remote") % f)
1434 1428
1435 1429 # resolve the manifest to determine which files
1436 1430 # we care about merging
1437 1431 self.ui.note(_("resolving manifests\n"))
1438 1432 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1439 1433 (force, allow, moddirstate, linear_path))
1440 1434 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1441 1435 (short(man), short(m1n), short(m2n)))
1442 1436
1443 1437 merge = {}
1444 1438 get = {}
1445 1439 remove = []
1446 1440
1447 1441 # construct a working dir manifest
1448 1442 mw = m1.copy()
1449 1443 mfw = mf1.copy()
1450 1444 umap = dict.fromkeys(unknown)
1451 1445
1452 1446 for f in added + modified + unknown:
1453 1447 mw[f] = ""
1454 1448 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1455 1449
1456 1450 if moddirstate and not wlock:
1457 1451 wlock = self.wlock()
1458 1452
1459 1453 for f in deleted + removed:
1460 1454 if f in mw:
1461 1455 del mw[f]
1462 1456
1463 1457 # If we're jumping between revisions (as opposed to merging),
1464 1458 # and if neither the working directory nor the target rev has
1465 1459 # the file, then we need to remove it from the dirstate, to
1466 1460 # prevent the dirstate from listing the file when it is no
1467 1461 # longer in the manifest.
1468 1462 if moddirstate and linear_path and f not in m2:
1469 1463 self.dirstate.forget((f,))
1470 1464
1471 1465 # Compare manifests
1472 1466 for f, n in mw.iteritems():
1473 1467 if choose and not choose(f):
1474 1468 continue
1475 1469 if f in m2:
1476 1470 s = 0
1477 1471
1478 1472 # is the wfile new since m1, and match m2?
1479 1473 if f not in m1:
1480 1474 t1 = self.wread(f)
1481 1475 t2 = self.file(f).read(m2[f])
1482 1476 if cmp(t1, t2) == 0:
1483 1477 n = m2[f]
1484 1478 del t1, t2
1485 1479
1486 1480 # are files different?
1487 1481 if n != m2[f]:
1488 1482 a = ma.get(f, nullid)
1489 1483 # are both different from the ancestor?
1490 1484 if n != a and m2[f] != a:
1491 1485 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1492 1486 # merge executable bits
1493 1487 # "if we changed or they changed, change in merge"
1494 1488 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1495 1489 mode = ((a^b) | (a^c)) ^ a
1496 1490 merge[f] = (m1.get(f, nullid), m2[f], mode)
1497 1491 s = 1
1498 1492 # are we clobbering?
1499 1493 # is remote's version newer?
1500 1494 # or are we going back in time?
1501 1495 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1502 1496 self.ui.debug(_(" remote %s is newer, get\n") % f)
1503 1497 get[f] = m2[f]
1504 1498 s = 1
1505 1499 elif f in umap:
1506 1500 # this unknown file is the same as the checkout
1507 1501 get[f] = m2[f]
1508 1502
1509 1503 if not s and mfw[f] != mf2[f]:
1510 1504 if force:
1511 1505 self.ui.debug(_(" updating permissions for %s\n") % f)
1512 1506 util.set_exec(self.wjoin(f), mf2[f])
1513 1507 else:
1514 1508 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1515 1509 mode = ((a^b) | (a^c)) ^ a
1516 1510 if mode != b:
1517 1511 self.ui.debug(_(" updating permissions for %s\n")
1518 1512 % f)
1519 1513 util.set_exec(self.wjoin(f), mode)
1520 1514 del m2[f]
1521 1515 elif f in ma:
1522 1516 if n != ma[f]:
1523 1517 r = _("d")
1524 1518 if not force and (linear_path or allow):
1525 1519 r = self.ui.prompt(
1526 1520 (_(" local changed %s which remote deleted\n") % f) +
1527 1521 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1528 1522 if r == _("d"):
1529 1523 remove.append(f)
1530 1524 else:
1531 1525 self.ui.debug(_("other deleted %s\n") % f)
1532 1526 remove.append(f) # other deleted it
1533 1527 else:
1534 1528 # file is created on branch or in working directory
1535 1529 if force and f not in umap:
1536 1530 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1537 1531 remove.append(f)
1538 1532 elif n == m1.get(f, nullid): # same as parent
1539 1533 if p2 == pa: # going backwards?
1540 1534 self.ui.debug(_("remote deleted %s\n") % f)
1541 1535 remove.append(f)
1542 1536 else:
1543 1537 self.ui.debug(_("local modified %s, keeping\n") % f)
1544 1538 else:
1545 1539 self.ui.debug(_("working dir created %s, keeping\n") % f)
1546 1540
1547 1541 for f, n in m2.iteritems():
1548 1542 if choose and not choose(f):
1549 1543 continue
1550 1544 if f[0] == "/":
1551 1545 continue
1552 1546 if f in ma and n != ma[f]:
1553 1547 r = _("k")
1554 1548 if not force and (linear_path or allow):
1555 1549 r = self.ui.prompt(
1556 1550 (_("remote changed %s which local deleted\n") % f) +
1557 1551 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1558 1552 if r == _("k"):
1559 1553 get[f] = n
1560 1554 elif f not in ma:
1561 1555 self.ui.debug(_("remote created %s\n") % f)
1562 1556 get[f] = n
1563 1557 else:
1564 1558 if force or p2 == pa: # going backwards?
1565 1559 self.ui.debug(_("local deleted %s, recreating\n") % f)
1566 1560 get[f] = n
1567 1561 else:
1568 1562 self.ui.debug(_("local deleted %s\n") % f)
1569 1563
1570 1564 del mw, m1, m2, ma
1571 1565
1572 1566 if force:
1573 1567 for f in merge:
1574 1568 get[f] = merge[f][1]
1575 1569 merge = {}
1576 1570
1577 1571 if linear_path or force:
1578 1572 # we don't need to do any magic, just jump to the new rev
1579 1573 branch_merge = False
1580 1574 p1, p2 = p2, nullid
1581 1575 else:
1582 1576 if not allow:
1583 1577 self.ui.status(_("this update spans a branch"
1584 1578 " affecting the following files:\n"))
1585 1579 fl = merge.keys() + get.keys()
1586 1580 fl.sort()
1587 1581 for f in fl:
1588 1582 cf = ""
1589 1583 if f in merge:
1590 1584 cf = _(" (resolve)")
1591 1585 self.ui.status(" %s%s\n" % (f, cf))
1592 1586 self.ui.warn(_("aborting update spanning branches!\n"))
1593 1587 self.ui.status(_("(use update -m to merge across branches"
1594 1588 " or -C to lose changes)\n"))
1595 1589 return 1
1596 1590 branch_merge = True
1597 1591
1598 1592 # get the files we don't need to change
1599 1593 files = get.keys()
1600 1594 files.sort()
1601 1595 for f in files:
1602 1596 if f[0] == "/":
1603 1597 continue
1604 1598 self.ui.note(_("getting %s\n") % f)
1605 1599 t = self.file(f).read(get[f])
1606 1600 self.wwrite(f, t)
1607 1601 util.set_exec(self.wjoin(f), mf2[f])
1608 1602 if moddirstate:
1609 1603 if branch_merge:
1610 1604 self.dirstate.update([f], 'n', st_mtime=-1)
1611 1605 else:
1612 1606 self.dirstate.update([f], 'n')
1613 1607
1614 1608 # merge the tricky bits
1615 1609 files = merge.keys()
1616 1610 files.sort()
1617 1611 for f in files:
1618 1612 self.ui.status(_("merging %s\n") % f)
1619 1613 my, other, flag = merge[f]
1620 1614 ret = self.merge3(f, my, other)
1621 1615 if ret:
1622 1616 err = True
1623 1617 util.set_exec(self.wjoin(f), flag)
1624 1618 if moddirstate:
1625 1619 if branch_merge:
1626 1620 # We've done a branch merge, mark this file as merged
1627 1621 # so that we properly record the merger later
1628 1622 self.dirstate.update([f], 'm')
1629 1623 else:
1630 1624 # We've update-merged a locally modified file, so
1631 1625 # we set the dirstate to emulate a normal checkout
1632 1626 # of that file some time in the past. Thus our
1633 1627 # merge will appear as a normal local file
1634 1628 # modification.
1635 1629 f_len = len(self.file(f).read(other))
1636 1630 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1637 1631
1638 1632 remove.sort()
1639 1633 for f in remove:
1640 1634 self.ui.note(_("removing %s\n") % f)
1641 1635 try:
1642 1636 util.unlink(self.wjoin(f))
1643 1637 except OSError, inst:
1644 1638 if inst.errno != errno.ENOENT:
1645 1639 self.ui.warn(_("update failed to remove %s: %s!\n") %
1646 1640 (f, inst.strerror))
1647 1641 if moddirstate:
1648 1642 if branch_merge:
1649 1643 self.dirstate.update(remove, 'r')
1650 1644 else:
1651 1645 self.dirstate.forget(remove)
1652 1646
1653 1647 if moddirstate:
1654 1648 self.dirstate.setparents(p1, p2)
1655 1649 return err
1656 1650
1657 1651 def merge3(self, fn, my, other):
1658 1652 """perform a 3-way merge in the working directory"""
1659 1653
1660 1654 def temp(prefix, node):
1661 1655 pre = "%s~%s." % (os.path.basename(fn), prefix)
1662 1656 (fd, name) = tempfile.mkstemp("", pre)
1663 1657 f = os.fdopen(fd, "wb")
1664 1658 self.wwrite(fn, fl.read(node), f)
1665 1659 f.close()
1666 1660 return name
1667 1661
1668 1662 fl = self.file(fn)
1669 1663 base = fl.ancestor(my, other)
1670 1664 a = self.wjoin(fn)
1671 1665 b = temp("base", base)
1672 1666 c = temp("other", other)
1673 1667
1674 1668 self.ui.note(_("resolving %s\n") % fn)
1675 1669 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1676 1670 (fn, short(my), short(other), short(base)))
1677 1671
1678 1672 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1679 1673 or "hgmerge")
1680 1674 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1681 1675 if r:
1682 1676 self.ui.warn(_("merging %s failed!\n") % fn)
1683 1677
1684 1678 os.unlink(b)
1685 1679 os.unlink(c)
1686 1680 return r
1687 1681
1688 1682 def verify(self):
1689 1683 filelinkrevs = {}
1690 1684 filenodes = {}
1691 1685 changesets = revisions = files = 0
1692 1686 errors = [0]
1693 1687 neededmanifests = {}
1694 1688
1695 1689 def err(msg):
1696 1690 self.ui.warn(msg + "\n")
1697 1691 errors[0] += 1
1698 1692
1699 1693 def checksize(obj, name):
1700 1694 d = obj.checksize()
1701 1695 if d[0]:
1702 1696 err(_("%s data length off by %d bytes") % (name, d[0]))
1703 1697 if d[1]:
1704 1698 err(_("%s index contains %d extra bytes") % (name, d[1]))
1705 1699
1706 1700 seen = {}
1707 1701 self.ui.status(_("checking changesets\n"))
1708 1702 checksize(self.changelog, "changelog")
1709 1703
1710 1704 for i in range(self.changelog.count()):
1711 1705 changesets += 1
1712 1706 n = self.changelog.node(i)
1713 1707 l = self.changelog.linkrev(n)
1714 1708 if l != i:
1715 1709 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1716 1710 if n in seen:
1717 1711 err(_("duplicate changeset at revision %d") % i)
1718 1712 seen[n] = 1
1719 1713
1720 1714 for p in self.changelog.parents(n):
1721 1715 if p not in self.changelog.nodemap:
1722 1716 err(_("changeset %s has unknown parent %s") %
1723 1717 (short(n), short(p)))
1724 1718 try:
1725 1719 changes = self.changelog.read(n)
1726 1720 except KeyboardInterrupt:
1727 1721 self.ui.warn(_("interrupted"))
1728 1722 raise
1729 1723 except Exception, inst:
1730 1724 err(_("unpacking changeset %s: %s") % (short(n), inst))
1731 1725
1732 1726 neededmanifests[changes[0]] = n
1733 1727
1734 1728 for f in changes[3]:
1735 1729 filelinkrevs.setdefault(f, []).append(i)
1736 1730
1737 1731 seen = {}
1738 1732 self.ui.status(_("checking manifests\n"))
1739 1733 checksize(self.manifest, "manifest")
1740 1734
1741 1735 for i in range(self.manifest.count()):
1742 1736 n = self.manifest.node(i)
1743 1737 l = self.manifest.linkrev(n)
1744 1738
1745 1739 if l < 0 or l >= self.changelog.count():
1746 1740 err(_("bad manifest link (%d) at revision %d") % (l, i))
1747 1741
1748 1742 if n in neededmanifests:
1749 1743 del neededmanifests[n]
1750 1744
1751 1745 if n in seen:
1752 1746 err(_("duplicate manifest at revision %d") % i)
1753 1747
1754 1748 seen[n] = 1
1755 1749
1756 1750 for p in self.manifest.parents(n):
1757 1751 if p not in self.manifest.nodemap:
1758 1752 err(_("manifest %s has unknown parent %s") %
1759 1753 (short(n), short(p)))
1760 1754
1761 1755 try:
1762 1756 delta = mdiff.patchtext(self.manifest.delta(n))
1763 1757 except KeyboardInterrupt:
1764 1758 self.ui.warn(_("interrupted"))
1765 1759 raise
1766 1760 except Exception, inst:
1767 1761 err(_("unpacking manifest %s: %s") % (short(n), inst))
1768 1762
1769 1763 ff = [ l.split('\0') for l in delta.splitlines() ]
1770 1764 for f, fn in ff:
1771 1765 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1772 1766
1773 1767 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1774 1768
1775 1769 for m, c in neededmanifests.items():
1776 1770 err(_("Changeset %s refers to unknown manifest %s") %
1777 1771 (short(m), short(c)))
1778 1772 del neededmanifests
1779 1773
1780 1774 for f in filenodes:
1781 1775 if f not in filelinkrevs:
1782 1776 err(_("file %s in manifest but not in changesets") % f)
1783 1777
1784 1778 for f in filelinkrevs:
1785 1779 if f not in filenodes:
1786 1780 err(_("file %s in changeset but not in manifest") % f)
1787 1781
1788 1782 self.ui.status(_("checking files\n"))
1789 1783 ff = filenodes.keys()
1790 1784 ff.sort()
1791 1785 for f in ff:
1792 1786 if f == "/dev/null":
1793 1787 continue
1794 1788 files += 1
1795 1789 fl = self.file(f)
1796 1790 checksize(fl, f)
1797 1791
1798 1792 nodes = {nullid: 1}
1799 1793 seen = {}
1800 1794 for i in range(fl.count()):
1801 1795 revisions += 1
1802 1796 n = fl.node(i)
1803 1797
1804 1798 if n in seen:
1805 1799 err(_("%s: duplicate revision %d") % (f, i))
1806 1800 if n not in filenodes[f]:
1807 1801 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1808 1802 else:
1809 1803 del filenodes[f][n]
1810 1804
1811 1805 flr = fl.linkrev(n)
1812 1806 if flr not in filelinkrevs[f]:
1813 1807 err(_("%s:%s points to unexpected changeset %d")
1814 1808 % (f, short(n), flr))
1815 1809 else:
1816 1810 filelinkrevs[f].remove(flr)
1817 1811
1818 1812 # verify contents
1819 1813 try:
1820 1814 t = fl.read(n)
1821 1815 except KeyboardInterrupt:
1822 1816 self.ui.warn(_("interrupted"))
1823 1817 raise
1824 1818 except Exception, inst:
1825 1819 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1826 1820
1827 1821 # verify parents
1828 1822 (p1, p2) = fl.parents(n)
1829 1823 if p1 not in nodes:
1830 1824 err(_("file %s:%s unknown parent 1 %s") %
1831 1825 (f, short(n), short(p1)))
1832 1826 if p2 not in nodes:
1833 1827 err(_("file %s:%s unknown parent 2 %s") %
1834 1828 (f, short(n), short(p1)))
1835 1829 nodes[n] = 1
1836 1830
1837 1831 # cross-check
1838 1832 for node in filenodes[f]:
1839 1833 err(_("node %s in manifests not in %s") % (hex(node), f))
1840 1834
1841 1835 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1842 1836 (files, changesets, revisions))
1843 1837
1844 1838 if errors[0]:
1845 1839 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1846 1840 return 1
General Comments 0
You need to be logged in to leave comments. Login now