##// END OF EJS Templates
fix minor bugs in localrepo.hook.
Vadim Gelfer -
r2190:b67fcd91 default
parent child Browse files
Show More
@@ -1,2069 +1,2069
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "revlog traceback")
16 16
17 17 class localrepository(object):
18 18 def __del__(self):
19 19 self.transhandle = None
20 20 def __init__(self, parentui, path=None, create=0):
21 21 if not path:
22 22 p = os.getcwd()
23 23 while not os.path.isdir(os.path.join(p, ".hg")):
24 24 oldp = p
25 25 p = os.path.dirname(p)
26 26 if p == oldp:
27 27 raise repo.RepoError(_("no repo found"))
28 28 path = p
29 29 self.path = os.path.join(path, ".hg")
30 30
31 31 if not create and not os.path.isdir(self.path):
32 32 raise repo.RepoError(_("repository %s not found") % path)
33 33
34 34 self.root = os.path.abspath(path)
35 35 self.origroot = path
36 36 self.ui = ui.ui(parentui=parentui)
37 37 self.opener = util.opener(self.path)
38 38 self.wopener = util.opener(self.root)
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 except IOError:
43 43 pass
44 44
45 45 v = self.ui.revlogopts
46 46 self.revlogversion = int(v.get('format', revlog.REVLOGV0))
47 47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 48 flags = 0
49 49 for x in v.get('flags', "").split():
50 50 flags |= revlog.flagstr(x)
51 51
52 52 v = self.revlogversion | flags
53 53 self.manifest = manifest.manifest(self.opener, v)
54 54 self.changelog = changelog.changelog(self.opener, v)
55 55
56 56 # the changelog might not have the inline index flag
57 57 # on. If the format of the changelog is the same as found in
58 58 # .hgrc, apply any flags found in the .hgrc as well.
59 59 # Otherwise, just version from the changelog
60 60 v = self.changelog.version
61 61 if v == self.revlogversion:
62 62 v |= flags
63 63 self.revlogversion = v
64 64
65 65 self.tagscache = None
66 66 self.nodetagscache = None
67 67 self.encodepats = None
68 68 self.decodepats = None
69 69 self.transhandle = None
70 70
71 71 if create:
72 72 os.mkdir(self.path)
73 73 os.mkdir(self.join("data"))
74 74
75 75 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
76 76
77 77 def hook(self, name, throw=False, **args):
78 78 def callhook(hname, funcname):
79 79 '''call python hook. hook is callable object, looked up as
80 80 name in python module. if callable returns "true", hook
81 81 passes, else fails. if hook raises exception, treated as
82 82 hook failure. exception propagates if throw is "true".'''
83 83
84 84 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
85 85 d = funcname.rfind('.')
86 86 if d == -1:
87 87 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
88 88 % (hname, funcname))
89 89 modname = funcname[:d]
90 90 try:
91 91 obj = __import__(modname)
92 92 except ImportError:
93 93 raise util.Abort(_('%s hook is invalid '
94 94 '(import of "%s" failed)') %
95 95 (hname, modname))
96 96 try:
97 97 for p in funcname.split('.')[1:]:
98 98 obj = getattr(obj, p)
99 99 except AttributeError, err:
100 100 raise util.Abort(_('%s hook is invalid '
101 101 '("%s" is not defined)') %
102 102 (hname, funcname))
103 103 if not callable(obj):
104 104 raise util.Abort(_('%s hook is invalid '
105 105 '("%s" is not callable)') %
106 106 (hname, funcname))
107 107 try:
108 r = obj(ui=ui, repo=repo, hooktype=name, **args)
108 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
109 109 except (KeyboardInterrupt, util.SignalInterrupt):
110 110 raise
111 111 except Exception, exc:
112 112 if isinstance(exc, util.Abort):
113 113 self.ui.warn(_('error: %s hook failed: %s\n') %
114 114 (hname, exc.args[0] % exc.args[1:]))
115 115 else:
116 116 self.ui.warn(_('error: %s hook raised an exception: '
117 117 '%s\n') % (hname, exc))
118 118 if throw:
119 119 raise
120 120 if self.ui.traceback:
121 121 traceback.print_exc()
122 122 return False
123 123 if not r:
124 124 if throw:
125 125 raise util.Abort(_('%s hook failed') % hname)
126 126 self.ui.warn(_('error: %s hook failed\n') % hname)
127 127 return r
128 128
129 129 def runhook(name, cmd):
130 130 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
131 131 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
132 132 [(k.upper(), v) for k, v in args.iteritems()])
133 133 r = util.system(cmd, environ=env, cwd=self.root)
134 134 if r:
135 135 desc, r = util.explain_exit(r)
136 136 if throw:
137 137 raise util.Abort(_('%s hook %s') % (name, desc))
138 138 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
139 139 return False
140 140 return True
141 141
142 142 r = True
143 143 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
144 144 if hname.split(".", 1)[0] == name and cmd]
145 145 hooks.sort()
146 146 for hname, cmd in hooks:
147 147 if cmd.startswith('python:'):
148 148 r = callhook(hname, cmd[7:].strip()) and r
149 149 else:
150 150 r = runhook(hname, cmd) and r
151 151 return r
152 152
153 153 def tags(self):
154 154 '''return a mapping of tag to node'''
155 155 if not self.tagscache:
156 156 self.tagscache = {}
157 157
158 158 def parsetag(line, context):
159 159 if not line:
160 160 return
161 161 s = l.split(" ", 1)
162 162 if len(s) != 2:
163 163 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
164 164 return
165 165 node, key = s
166 166 try:
167 167 bin_n = bin(node)
168 168 except TypeError:
169 169 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
170 170 return
171 171 if bin_n not in self.changelog.nodemap:
172 172 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
173 173 return
174 174 self.tagscache[key.strip()] = bin_n
175 175
176 176 # read each head of the tags file, ending with the tip
177 177 # and add each tag found to the map, with "newer" ones
178 178 # taking precedence
179 179 fl = self.file(".hgtags")
180 180 h = fl.heads()
181 181 h.reverse()
182 182 for r in h:
183 183 count = 0
184 184 for l in fl.read(r).splitlines():
185 185 count += 1
186 186 parsetag(l, ".hgtags:%d" % count)
187 187
188 188 try:
189 189 f = self.opener("localtags")
190 190 count = 0
191 191 for l in f:
192 192 count += 1
193 193 parsetag(l, "localtags:%d" % count)
194 194 except IOError:
195 195 pass
196 196
197 197 self.tagscache['tip'] = self.changelog.tip()
198 198
199 199 return self.tagscache
200 200
201 201 def tagslist(self):
202 202 '''return a list of tags ordered by revision'''
203 203 l = []
204 204 for t, n in self.tags().items():
205 205 try:
206 206 r = self.changelog.rev(n)
207 207 except:
208 208 r = -2 # sort to the beginning of the list if unknown
209 209 l.append((r, t, n))
210 210 l.sort()
211 211 return [(t, n) for r, t, n in l]
212 212
213 213 def nodetags(self, node):
214 214 '''return the tags associated with a node'''
215 215 if not self.nodetagscache:
216 216 self.nodetagscache = {}
217 217 for t, n in self.tags().items():
218 218 self.nodetagscache.setdefault(n, []).append(t)
219 219 return self.nodetagscache.get(node, [])
220 220
221 221 def lookup(self, key):
222 222 try:
223 223 return self.tags()[key]
224 224 except KeyError:
225 225 try:
226 226 return self.changelog.lookup(key)
227 227 except:
228 228 raise repo.RepoError(_("unknown revision '%s'") % key)
229 229
230 230 def dev(self):
231 231 return os.stat(self.path).st_dev
232 232
233 233 def local(self):
234 234 return True
235 235
236 236 def join(self, f):
237 237 return os.path.join(self.path, f)
238 238
239 239 def wjoin(self, f):
240 240 return os.path.join(self.root, f)
241 241
242 242 def file(self, f):
243 243 if f[0] == '/':
244 244 f = f[1:]
245 245 return filelog.filelog(self.opener, f, self.revlogversion)
246 246
247 247 def getcwd(self):
248 248 return self.dirstate.getcwd()
249 249
250 250 def wfile(self, f, mode='r'):
251 251 return self.wopener(f, mode)
252 252
253 253 def wread(self, filename):
254 254 if self.encodepats == None:
255 255 l = []
256 256 for pat, cmd in self.ui.configitems("encode"):
257 257 mf = util.matcher(self.root, "", [pat], [], [])[1]
258 258 l.append((mf, cmd))
259 259 self.encodepats = l
260 260
261 261 data = self.wopener(filename, 'r').read()
262 262
263 263 for mf, cmd in self.encodepats:
264 264 if mf(filename):
265 265 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
266 266 data = util.filter(data, cmd)
267 267 break
268 268
269 269 return data
270 270
271 271 def wwrite(self, filename, data, fd=None):
272 272 if self.decodepats == None:
273 273 l = []
274 274 for pat, cmd in self.ui.configitems("decode"):
275 275 mf = util.matcher(self.root, "", [pat], [], [])[1]
276 276 l.append((mf, cmd))
277 277 self.decodepats = l
278 278
279 279 for mf, cmd in self.decodepats:
280 280 if mf(filename):
281 281 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
282 282 data = util.filter(data, cmd)
283 283 break
284 284
285 285 if fd:
286 286 return fd.write(data)
287 287 return self.wopener(filename, 'w').write(data)
288 288
289 289 def transaction(self):
290 290 tr = self.transhandle
291 291 if tr != None and tr.running():
292 292 return tr.nest()
293 293
294 294 # save dirstate for undo
295 295 try:
296 296 ds = self.opener("dirstate").read()
297 297 except IOError:
298 298 ds = ""
299 299 self.opener("journal.dirstate", "w").write(ds)
300 300
301 301 tr = transaction.transaction(self.ui.warn, self.opener,
302 302 self.join("journal"),
303 303 aftertrans(self.path))
304 304 self.transhandle = tr
305 305 return tr
306 306
307 307 def recover(self):
308 308 l = self.lock()
309 309 if os.path.exists(self.join("journal")):
310 310 self.ui.status(_("rolling back interrupted transaction\n"))
311 311 transaction.rollback(self.opener, self.join("journal"))
312 312 self.reload()
313 313 return True
314 314 else:
315 315 self.ui.warn(_("no interrupted transaction available\n"))
316 316 return False
317 317
318 318 def undo(self, wlock=None):
319 319 if not wlock:
320 320 wlock = self.wlock()
321 321 l = self.lock()
322 322 if os.path.exists(self.join("undo")):
323 323 self.ui.status(_("rolling back last transaction\n"))
324 324 transaction.rollback(self.opener, self.join("undo"))
325 325 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
326 326 self.reload()
327 327 self.wreload()
328 328 else:
329 329 self.ui.warn(_("no undo information available\n"))
330 330
331 331 def wreload(self):
332 332 self.dirstate.read()
333 333
334 334 def reload(self):
335 335 self.changelog.load()
336 336 self.manifest.load()
337 337 self.tagscache = None
338 338 self.nodetagscache = None
339 339
340 340 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
341 341 desc=None):
342 342 try:
343 343 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
344 344 except lock.LockHeld, inst:
345 345 if not wait:
346 346 raise
347 347 self.ui.warn(_("waiting for lock on %s held by %s\n") %
348 348 (desc, inst.args[0]))
349 349 # default to 600 seconds timeout
350 350 l = lock.lock(self.join(lockname),
351 351 int(self.ui.config("ui", "timeout") or 600),
352 352 releasefn, desc=desc)
353 353 if acquirefn:
354 354 acquirefn()
355 355 return l
356 356
357 357 def lock(self, wait=1):
358 358 return self.do_lock("lock", wait, acquirefn=self.reload,
359 359 desc=_('repository %s') % self.origroot)
360 360
361 361 def wlock(self, wait=1):
362 362 return self.do_lock("wlock", wait, self.dirstate.write,
363 363 self.wreload,
364 364 desc=_('working directory of %s') % self.origroot)
365 365
366 366 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
367 367 "determine whether a new filenode is needed"
368 368 fp1 = manifest1.get(filename, nullid)
369 369 fp2 = manifest2.get(filename, nullid)
370 370
371 371 if fp2 != nullid:
372 372 # is one parent an ancestor of the other?
373 373 fpa = filelog.ancestor(fp1, fp2)
374 374 if fpa == fp1:
375 375 fp1, fp2 = fp2, nullid
376 376 elif fpa == fp2:
377 377 fp2 = nullid
378 378
379 379 # is the file unmodified from the parent? report existing entry
380 380 if fp2 == nullid and text == filelog.read(fp1):
381 381 return (fp1, None, None)
382 382
383 383 return (None, fp1, fp2)
384 384
385 385 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
386 386 orig_parent = self.dirstate.parents()[0] or nullid
387 387 p1 = p1 or self.dirstate.parents()[0] or nullid
388 388 p2 = p2 or self.dirstate.parents()[1] or nullid
389 389 c1 = self.changelog.read(p1)
390 390 c2 = self.changelog.read(p2)
391 391 m1 = self.manifest.read(c1[0])
392 392 mf1 = self.manifest.readflags(c1[0])
393 393 m2 = self.manifest.read(c2[0])
394 394 changed = []
395 395
396 396 if orig_parent == p1:
397 397 update_dirstate = 1
398 398 else:
399 399 update_dirstate = 0
400 400
401 401 if not wlock:
402 402 wlock = self.wlock()
403 403 l = self.lock()
404 404 tr = self.transaction()
405 405 mm = m1.copy()
406 406 mfm = mf1.copy()
407 407 linkrev = self.changelog.count()
408 408 for f in files:
409 409 try:
410 410 t = self.wread(f)
411 411 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
412 412 r = self.file(f)
413 413 mfm[f] = tm
414 414
415 415 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
416 416 if entry:
417 417 mm[f] = entry
418 418 continue
419 419
420 420 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
421 421 changed.append(f)
422 422 if update_dirstate:
423 423 self.dirstate.update([f], "n")
424 424 except IOError:
425 425 try:
426 426 del mm[f]
427 427 del mfm[f]
428 428 if update_dirstate:
429 429 self.dirstate.forget([f])
430 430 except:
431 431 # deleted from p2?
432 432 pass
433 433
434 434 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
435 435 user = user or self.ui.username()
436 436 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
437 437 tr.close()
438 438 if update_dirstate:
439 439 self.dirstate.setparents(n, nullid)
440 440
441 441 def commit(self, files=None, text="", user=None, date=None,
442 442 match=util.always, force=False, lock=None, wlock=None):
443 443 commit = []
444 444 remove = []
445 445 changed = []
446 446
447 447 if files:
448 448 for f in files:
449 449 s = self.dirstate.state(f)
450 450 if s in 'nmai':
451 451 commit.append(f)
452 452 elif s == 'r':
453 453 remove.append(f)
454 454 else:
455 455 self.ui.warn(_("%s not tracked!\n") % f)
456 456 else:
457 457 modified, added, removed, deleted, unknown = self.changes(match=match)
458 458 commit = modified + added
459 459 remove = removed
460 460
461 461 p1, p2 = self.dirstate.parents()
462 462 c1 = self.changelog.read(p1)
463 463 c2 = self.changelog.read(p2)
464 464 m1 = self.manifest.read(c1[0])
465 465 mf1 = self.manifest.readflags(c1[0])
466 466 m2 = self.manifest.read(c2[0])
467 467
468 468 if not commit and not remove and not force and p2 == nullid:
469 469 self.ui.status(_("nothing changed\n"))
470 470 return None
471 471
472 472 xp1 = hex(p1)
473 473 if p2 == nullid: xp2 = ''
474 474 else: xp2 = hex(p2)
475 475
476 476 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
477 477
478 478 if not wlock:
479 479 wlock = self.wlock()
480 480 if not lock:
481 481 lock = self.lock()
482 482 tr = self.transaction()
483 483
484 484 # check in files
485 485 new = {}
486 486 linkrev = self.changelog.count()
487 487 commit.sort()
488 488 for f in commit:
489 489 self.ui.note(f + "\n")
490 490 try:
491 491 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
492 492 t = self.wread(f)
493 493 except IOError:
494 494 self.ui.warn(_("trouble committing %s!\n") % f)
495 495 raise
496 496
497 497 r = self.file(f)
498 498
499 499 meta = {}
500 500 cp = self.dirstate.copied(f)
501 501 if cp:
502 502 meta["copy"] = cp
503 503 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
504 504 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
505 505 fp1, fp2 = nullid, nullid
506 506 else:
507 507 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
508 508 if entry:
509 509 new[f] = entry
510 510 continue
511 511
512 512 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
513 513 # remember what we've added so that we can later calculate
514 514 # the files to pull from a set of changesets
515 515 changed.append(f)
516 516
517 517 # update manifest
518 518 m1 = m1.copy()
519 519 m1.update(new)
520 520 for f in remove:
521 521 if f in m1:
522 522 del m1[f]
523 523 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
524 524 (new, remove))
525 525
526 526 # add changeset
527 527 new = new.keys()
528 528 new.sort()
529 529
530 530 user = user or self.ui.username()
531 531 if not text:
532 532 edittext = [""]
533 533 if p2 != nullid:
534 534 edittext.append("HG: branch merge")
535 535 edittext.extend(["HG: changed %s" % f for f in changed])
536 536 edittext.extend(["HG: removed %s" % f for f in remove])
537 537 if not changed and not remove:
538 538 edittext.append("HG: no files changed")
539 539 edittext.append("")
540 540 # run editor in the repository root
541 541 olddir = os.getcwd()
542 542 os.chdir(self.root)
543 543 edittext = self.ui.edit("\n".join(edittext), user)
544 544 os.chdir(olddir)
545 545 if not edittext.rstrip():
546 546 return None
547 547 text = edittext
548 548
549 549 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
550 550 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
551 551 parent2=xp2)
552 552 tr.close()
553 553
554 554 self.dirstate.setparents(n)
555 555 self.dirstate.update(new, "n")
556 556 self.dirstate.forget(remove)
557 557
558 558 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
559 559 return n
560 560
561 561 def walk(self, node=None, files=[], match=util.always, badmatch=None):
562 562 if node:
563 563 fdict = dict.fromkeys(files)
564 564 for fn in self.manifest.read(self.changelog.read(node)[0]):
565 565 fdict.pop(fn, None)
566 566 if match(fn):
567 567 yield 'm', fn
568 568 for fn in fdict:
569 569 if badmatch and badmatch(fn):
570 570 if match(fn):
571 571 yield 'b', fn
572 572 else:
573 573 self.ui.warn(_('%s: No such file in rev %s\n') % (
574 574 util.pathto(self.getcwd(), fn), short(node)))
575 575 else:
576 576 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
577 577 yield src, fn
578 578
579 579 def changes(self, node1=None, node2=None, files=[], match=util.always,
580 580 wlock=None, show_ignored=None):
581 581 """return changes between two nodes or node and working directory
582 582
583 583 If node1 is None, use the first dirstate parent instead.
584 584 If node2 is None, compare node1 with working directory.
585 585 """
586 586
587 587 def fcmp(fn, mf):
588 588 t1 = self.wread(fn)
589 589 t2 = self.file(fn).read(mf.get(fn, nullid))
590 590 return cmp(t1, t2)
591 591
592 592 def mfmatches(node):
593 593 change = self.changelog.read(node)
594 594 mf = dict(self.manifest.read(change[0]))
595 595 for fn in mf.keys():
596 596 if not match(fn):
597 597 del mf[fn]
598 598 return mf
599 599
600 600 if node1:
601 601 # read the manifest from node1 before the manifest from node2,
602 602 # so that we'll hit the manifest cache if we're going through
603 603 # all the revisions in parent->child order.
604 604 mf1 = mfmatches(node1)
605 605
606 606 # are we comparing the working directory?
607 607 if not node2:
608 608 if not wlock:
609 609 try:
610 610 wlock = self.wlock(wait=0)
611 611 except lock.LockException:
612 612 wlock = None
613 613 lookup, modified, added, removed, deleted, unknown, ignored = (
614 614 self.dirstate.changes(files, match, show_ignored))
615 615
616 616 # are we comparing working dir against its parent?
617 617 if not node1:
618 618 if lookup:
619 619 # do a full compare of any files that might have changed
620 620 mf2 = mfmatches(self.dirstate.parents()[0])
621 621 for f in lookup:
622 622 if fcmp(f, mf2):
623 623 modified.append(f)
624 624 elif wlock is not None:
625 625 self.dirstate.update([f], "n")
626 626 else:
627 627 # we are comparing working dir against non-parent
628 628 # generate a pseudo-manifest for the working dir
629 629 mf2 = mfmatches(self.dirstate.parents()[0])
630 630 for f in lookup + modified + added:
631 631 mf2[f] = ""
632 632 for f in removed:
633 633 if f in mf2:
634 634 del mf2[f]
635 635 else:
636 636 # we are comparing two revisions
637 637 deleted, unknown, ignored = [], [], []
638 638 mf2 = mfmatches(node2)
639 639
640 640 if node1:
641 641 # flush lists from dirstate before comparing manifests
642 642 modified, added = [], []
643 643
644 644 for fn in mf2:
645 645 if mf1.has_key(fn):
646 646 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
647 647 modified.append(fn)
648 648 del mf1[fn]
649 649 else:
650 650 added.append(fn)
651 651
652 652 removed = mf1.keys()
653 653
654 654 # sort and return results:
655 655 for l in modified, added, removed, deleted, unknown, ignored:
656 656 l.sort()
657 657 if show_ignored is None:
658 658 return (modified, added, removed, deleted, unknown)
659 659 else:
660 660 return (modified, added, removed, deleted, unknown, ignored)
661 661
662 662 def add(self, list, wlock=None):
663 663 if not wlock:
664 664 wlock = self.wlock()
665 665 for f in list:
666 666 p = self.wjoin(f)
667 667 if not os.path.exists(p):
668 668 self.ui.warn(_("%s does not exist!\n") % f)
669 669 elif not os.path.isfile(p):
670 670 self.ui.warn(_("%s not added: only files supported currently\n")
671 671 % f)
672 672 elif self.dirstate.state(f) in 'an':
673 673 self.ui.warn(_("%s already tracked!\n") % f)
674 674 else:
675 675 self.dirstate.update([f], "a")
676 676
677 677 def forget(self, list, wlock=None):
678 678 if not wlock:
679 679 wlock = self.wlock()
680 680 for f in list:
681 681 if self.dirstate.state(f) not in 'ai':
682 682 self.ui.warn(_("%s not added!\n") % f)
683 683 else:
684 684 self.dirstate.forget([f])
685 685
686 686 def remove(self, list, unlink=False, wlock=None):
687 687 if unlink:
688 688 for f in list:
689 689 try:
690 690 util.unlink(self.wjoin(f))
691 691 except OSError, inst:
692 692 if inst.errno != errno.ENOENT:
693 693 raise
694 694 if not wlock:
695 695 wlock = self.wlock()
696 696 for f in list:
697 697 p = self.wjoin(f)
698 698 if os.path.exists(p):
699 699 self.ui.warn(_("%s still exists!\n") % f)
700 700 elif self.dirstate.state(f) == 'a':
701 701 self.dirstate.forget([f])
702 702 elif f not in self.dirstate:
703 703 self.ui.warn(_("%s not tracked!\n") % f)
704 704 else:
705 705 self.dirstate.update([f], "r")
706 706
707 707 def undelete(self, list, wlock=None):
708 708 p = self.dirstate.parents()[0]
709 709 mn = self.changelog.read(p)[0]
710 710 mf = self.manifest.readflags(mn)
711 711 m = self.manifest.read(mn)
712 712 if not wlock:
713 713 wlock = self.wlock()
714 714 for f in list:
715 715 if self.dirstate.state(f) not in "r":
716 716 self.ui.warn("%s not removed!\n" % f)
717 717 else:
718 718 t = self.file(f).read(m[f])
719 719 self.wwrite(f, t)
720 720 util.set_exec(self.wjoin(f), mf[f])
721 721 self.dirstate.update([f], "n")
722 722
723 723 def copy(self, source, dest, wlock=None):
724 724 p = self.wjoin(dest)
725 725 if not os.path.exists(p):
726 726 self.ui.warn(_("%s does not exist!\n") % dest)
727 727 elif not os.path.isfile(p):
728 728 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
729 729 else:
730 730 if not wlock:
731 731 wlock = self.wlock()
732 732 if self.dirstate.state(dest) == '?':
733 733 self.dirstate.update([dest], "a")
734 734 self.dirstate.copy(source, dest)
735 735
736 736 def heads(self, start=None):
737 737 heads = self.changelog.heads(start)
738 738 # sort the output in rev descending order
739 739 heads = [(-self.changelog.rev(h), h) for h in heads]
740 740 heads.sort()
741 741 return [n for (r, n) in heads]
742 742
743 743 # branchlookup returns a dict giving a list of branches for
744 744 # each head. A branch is defined as the tag of a node or
745 745 # the branch of the node's parents. If a node has multiple
746 746 # branch tags, tags are eliminated if they are visible from other
747 747 # branch tags.
748 748 #
749 749 # So, for this graph: a->b->c->d->e
750 750 # \ /
751 751 # aa -----/
752 752 # a has tag 2.6.12
753 753 # d has tag 2.6.13
754 754 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
755 755 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
756 756 # from the list.
757 757 #
758 758 # It is possible that more than one head will have the same branch tag.
759 759 # callers need to check the result for multiple heads under the same
760 760 # branch tag if that is a problem for them (ie checkout of a specific
761 761 # branch).
762 762 #
763 763 # passing in a specific branch will limit the depth of the search
764 764 # through the parents. It won't limit the branches returned in the
765 765 # result though.
766 766 def branchlookup(self, heads=None, branch=None):
767 767 if not heads:
768 768 heads = self.heads()
769 769 headt = [ h for h in heads ]
770 770 chlog = self.changelog
771 771 branches = {}
772 772 merges = []
773 773 seenmerge = {}
774 774
775 775 # traverse the tree once for each head, recording in the branches
776 776 # dict which tags are visible from this head. The branches
777 777 # dict also records which tags are visible from each tag
778 778 # while we traverse.
779 779 while headt or merges:
780 780 if merges:
781 781 n, found = merges.pop()
782 782 visit = [n]
783 783 else:
784 784 h = headt.pop()
785 785 visit = [h]
786 786 found = [h]
787 787 seen = {}
788 788 while visit:
789 789 n = visit.pop()
790 790 if n in seen:
791 791 continue
792 792 pp = chlog.parents(n)
793 793 tags = self.nodetags(n)
794 794 if tags:
795 795 for x in tags:
796 796 if x == 'tip':
797 797 continue
798 798 for f in found:
799 799 branches.setdefault(f, {})[n] = 1
800 800 branches.setdefault(n, {})[n] = 1
801 801 break
802 802 if n not in found:
803 803 found.append(n)
804 804 if branch in tags:
805 805 continue
806 806 seen[n] = 1
807 807 if pp[1] != nullid and n not in seenmerge:
808 808 merges.append((pp[1], [x for x in found]))
809 809 seenmerge[n] = 1
810 810 if pp[0] != nullid:
811 811 visit.append(pp[0])
812 812 # traverse the branches dict, eliminating branch tags from each
813 813 # head that are visible from another branch tag for that head.
814 814 out = {}
815 815 viscache = {}
816 816 for h in heads:
817 817 def visible(node):
818 818 if node in viscache:
819 819 return viscache[node]
820 820 ret = {}
821 821 visit = [node]
822 822 while visit:
823 823 x = visit.pop()
824 824 if x in viscache:
825 825 ret.update(viscache[x])
826 826 elif x not in ret:
827 827 ret[x] = 1
828 828 if x in branches:
829 829 visit[len(visit):] = branches[x].keys()
830 830 viscache[node] = ret
831 831 return ret
832 832 if h not in branches:
833 833 continue
834 834 # O(n^2), but somewhat limited. This only searches the
835 835 # tags visible from a specific head, not all the tags in the
836 836 # whole repo.
837 837 for b in branches[h]:
838 838 vis = False
839 839 for bb in branches[h].keys():
840 840 if b != bb:
841 841 if b in visible(bb):
842 842 vis = True
843 843 break
844 844 if not vis:
845 845 l = out.setdefault(h, [])
846 846 l[len(l):] = self.nodetags(b)
847 847 return out
848 848
849 849 def branches(self, nodes):
850 850 if not nodes:
851 851 nodes = [self.changelog.tip()]
852 852 b = []
853 853 for n in nodes:
854 854 t = n
855 855 while n:
856 856 p = self.changelog.parents(n)
857 857 if p[1] != nullid or p[0] == nullid:
858 858 b.append((t, n, p[0], p[1]))
859 859 break
860 860 n = p[0]
861 861 return b
862 862
863 863 def between(self, pairs):
864 864 r = []
865 865
866 866 for top, bottom in pairs:
867 867 n, l, i = top, [], 0
868 868 f = 1
869 869
870 870 while n != bottom:
871 871 p = self.changelog.parents(n)[0]
872 872 if i == f:
873 873 l.append(n)
874 874 f = f * 2
875 875 n = p
876 876 i += 1
877 877
878 878 r.append(l)
879 879
880 880 return r
881 881
882 882 def findincoming(self, remote, base=None, heads=None, force=False):
883 883 m = self.changelog.nodemap
884 884 search = []
885 885 fetch = {}
886 886 seen = {}
887 887 seenbranch = {}
888 888 if base == None:
889 889 base = {}
890 890
891 891 if not heads:
892 892 heads = remote.heads()
893 893
894 894 if self.changelog.tip() == nullid:
895 895 if heads != [nullid]:
896 896 return [nullid]
897 897 return []
898 898
899 899 # assume we're closer to the tip than the root
900 900 # and start by examining the heads
901 901 self.ui.status(_("searching for changes\n"))
902 902
903 903 unknown = []
904 904 for h in heads:
905 905 if h not in m:
906 906 unknown.append(h)
907 907 else:
908 908 base[h] = 1
909 909
910 910 if not unknown:
911 911 return []
912 912
913 913 rep = {}
914 914 reqcnt = 0
915 915
916 916 # search through remote branches
917 917 # a 'branch' here is a linear segment of history, with four parts:
918 918 # head, root, first parent, second parent
919 919 # (a branch always has two parents (or none) by definition)
920 920 unknown = remote.branches(unknown)
921 921 while unknown:
922 922 r = []
923 923 while unknown:
924 924 n = unknown.pop(0)
925 925 if n[0] in seen:
926 926 continue
927 927
928 928 self.ui.debug(_("examining %s:%s\n")
929 929 % (short(n[0]), short(n[1])))
930 930 if n[0] == nullid:
931 931 break
932 932 if n in seenbranch:
933 933 self.ui.debug(_("branch already found\n"))
934 934 continue
935 935 if n[1] and n[1] in m: # do we know the base?
936 936 self.ui.debug(_("found incomplete branch %s:%s\n")
937 937 % (short(n[0]), short(n[1])))
938 938 search.append(n) # schedule branch range for scanning
939 939 seenbranch[n] = 1
940 940 else:
941 941 if n[1] not in seen and n[1] not in fetch:
942 942 if n[2] in m and n[3] in m:
943 943 self.ui.debug(_("found new changeset %s\n") %
944 944 short(n[1]))
945 945 fetch[n[1]] = 1 # earliest unknown
946 946 base[n[2]] = 1 # latest known
947 947 continue
948 948
949 949 for a in n[2:4]:
950 950 if a not in rep:
951 951 r.append(a)
952 952 rep[a] = 1
953 953
954 954 seen[n[0]] = 1
955 955
956 956 if r:
957 957 reqcnt += 1
958 958 self.ui.debug(_("request %d: %s\n") %
959 959 (reqcnt, " ".join(map(short, r))))
960 960 for p in range(0, len(r), 10):
961 961 for b in remote.branches(r[p:p+10]):
962 962 self.ui.debug(_("received %s:%s\n") %
963 963 (short(b[0]), short(b[1])))
964 964 if b[0] in m:
965 965 self.ui.debug(_("found base node %s\n")
966 966 % short(b[0]))
967 967 base[b[0]] = 1
968 968 elif b[0] not in seen:
969 969 unknown.append(b)
970 970
971 971 # do binary search on the branches we found
972 972 while search:
973 973 n = search.pop(0)
974 974 reqcnt += 1
975 975 l = remote.between([(n[0], n[1])])[0]
976 976 l.append(n[1])
977 977 p = n[0]
978 978 f = 1
979 979 for i in l:
980 980 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
981 981 if i in m:
982 982 if f <= 2:
983 983 self.ui.debug(_("found new branch changeset %s\n") %
984 984 short(p))
985 985 fetch[p] = 1
986 986 base[i] = 1
987 987 else:
988 988 self.ui.debug(_("narrowed branch search to %s:%s\n")
989 989 % (short(p), short(i)))
990 990 search.append((p, i))
991 991 break
992 992 p, f = i, f * 2
993 993
994 994 # sanity check our fetch list
995 995 for f in fetch.keys():
996 996 if f in m:
997 997 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
998 998
999 999 if base.keys() == [nullid]:
1000 1000 if force:
1001 1001 self.ui.warn(_("warning: repository is unrelated\n"))
1002 1002 else:
1003 1003 raise util.Abort(_("repository is unrelated"))
1004 1004
1005 1005 self.ui.note(_("found new changesets starting at ") +
1006 1006 " ".join([short(f) for f in fetch]) + "\n")
1007 1007
1008 1008 self.ui.debug(_("%d total queries\n") % reqcnt)
1009 1009
1010 1010 return fetch.keys()
1011 1011
1012 1012 def findoutgoing(self, remote, base=None, heads=None, force=False):
1013 1013 """Return list of nodes that are roots of subsets not in remote
1014 1014
1015 1015 If base dict is specified, assume that these nodes and their parents
1016 1016 exist on the remote side.
1017 1017 If a list of heads is specified, return only nodes which are heads
1018 1018 or ancestors of these heads, and return a second element which
1019 1019 contains all remote heads which get new children.
1020 1020 """
1021 1021 if base == None:
1022 1022 base = {}
1023 1023 self.findincoming(remote, base, heads, force=force)
1024 1024
1025 1025 self.ui.debug(_("common changesets up to ")
1026 1026 + " ".join(map(short, base.keys())) + "\n")
1027 1027
1028 1028 remain = dict.fromkeys(self.changelog.nodemap)
1029 1029
1030 1030 # prune everything remote has from the tree
1031 1031 del remain[nullid]
1032 1032 remove = base.keys()
1033 1033 while remove:
1034 1034 n = remove.pop(0)
1035 1035 if n in remain:
1036 1036 del remain[n]
1037 1037 for p in self.changelog.parents(n):
1038 1038 remove.append(p)
1039 1039
1040 1040 # find every node whose parents have been pruned
1041 1041 subset = []
1042 1042 # find every remote head that will get new children
1043 1043 updated_heads = {}
1044 1044 for n in remain:
1045 1045 p1, p2 = self.changelog.parents(n)
1046 1046 if p1 not in remain and p2 not in remain:
1047 1047 subset.append(n)
1048 1048 if heads:
1049 1049 if p1 in heads:
1050 1050 updated_heads[p1] = True
1051 1051 if p2 in heads:
1052 1052 updated_heads[p2] = True
1053 1053
1054 1054 # this is the set of all roots we have to push
1055 1055 if heads:
1056 1056 return subset, updated_heads.keys()
1057 1057 else:
1058 1058 return subset
1059 1059
1060 1060 def pull(self, remote, heads=None, force=False):
1061 1061 l = self.lock()
1062 1062
1063 1063 fetch = self.findincoming(remote, force=force)
1064 1064 if fetch == [nullid]:
1065 1065 self.ui.status(_("requesting all changes\n"))
1066 1066
1067 1067 if not fetch:
1068 1068 self.ui.status(_("no changes found\n"))
1069 1069 return 0
1070 1070
1071 1071 if heads is None:
1072 1072 cg = remote.changegroup(fetch, 'pull')
1073 1073 else:
1074 1074 cg = remote.changegroupsubset(fetch, heads, 'pull')
1075 1075 return self.addchangegroup(cg)
1076 1076
1077 1077 def push(self, remote, force=False, revs=None):
1078 1078 lock = remote.lock()
1079 1079
1080 1080 base = {}
1081 1081 remote_heads = remote.heads()
1082 1082 inc = self.findincoming(remote, base, remote_heads, force=force)
1083 1083 if not force and inc:
1084 1084 self.ui.warn(_("abort: unsynced remote changes!\n"))
1085 1085 self.ui.status(_("(did you forget to sync?"
1086 1086 " use push -f to force)\n"))
1087 1087 return 1
1088 1088
1089 1089 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1090 1090 if revs is not None:
1091 1091 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1092 1092 else:
1093 1093 bases, heads = update, self.changelog.heads()
1094 1094
1095 1095 if not bases:
1096 1096 self.ui.status(_("no changes found\n"))
1097 1097 return 1
1098 1098 elif not force:
1099 1099 # FIXME we don't properly detect creation of new heads
1100 1100 # in the push -r case, assume the user knows what he's doing
1101 1101 if not revs and len(remote_heads) < len(heads) \
1102 1102 and remote_heads != [nullid]:
1103 1103 self.ui.warn(_("abort: push creates new remote branches!\n"))
1104 1104 self.ui.status(_("(did you forget to merge?"
1105 1105 " use push -f to force)\n"))
1106 1106 return 1
1107 1107
1108 1108 if revs is None:
1109 1109 cg = self.changegroup(update, 'push')
1110 1110 else:
1111 1111 cg = self.changegroupsubset(update, revs, 'push')
1112 1112 return remote.addchangegroup(cg)
1113 1113
1114 1114 def changegroupsubset(self, bases, heads, source):
1115 1115 """This function generates a changegroup consisting of all the nodes
1116 1116 that are descendents of any of the bases, and ancestors of any of
1117 1117 the heads.
1118 1118
1119 1119 It is fairly complex as determining which filenodes and which
1120 1120 manifest nodes need to be included for the changeset to be complete
1121 1121 is non-trivial.
1122 1122
1123 1123 Another wrinkle is doing the reverse, figuring out which changeset in
1124 1124 the changegroup a particular filenode or manifestnode belongs to."""
1125 1125
1126 1126 self.hook('preoutgoing', throw=True, source=source)
1127 1127
1128 1128 # Set up some initial variables
1129 1129 # Make it easy to refer to self.changelog
1130 1130 cl = self.changelog
1131 1131 # msng is short for missing - compute the list of changesets in this
1132 1132 # changegroup.
1133 1133 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1134 1134 # Some bases may turn out to be superfluous, and some heads may be
1135 1135 # too. nodesbetween will return the minimal set of bases and heads
1136 1136 # necessary to re-create the changegroup.
1137 1137
1138 1138 # Known heads are the list of heads that it is assumed the recipient
1139 1139 # of this changegroup will know about.
1140 1140 knownheads = {}
1141 1141 # We assume that all parents of bases are known heads.
1142 1142 for n in bases:
1143 1143 for p in cl.parents(n):
1144 1144 if p != nullid:
1145 1145 knownheads[p] = 1
1146 1146 knownheads = knownheads.keys()
1147 1147 if knownheads:
1148 1148 # Now that we know what heads are known, we can compute which
1149 1149 # changesets are known. The recipient must know about all
1150 1150 # changesets required to reach the known heads from the null
1151 1151 # changeset.
1152 1152 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1153 1153 junk = None
1154 1154 # Transform the list into an ersatz set.
1155 1155 has_cl_set = dict.fromkeys(has_cl_set)
1156 1156 else:
1157 1157 # If there were no known heads, the recipient cannot be assumed to
1158 1158 # know about any changesets.
1159 1159 has_cl_set = {}
1160 1160
1161 1161 # Make it easy to refer to self.manifest
1162 1162 mnfst = self.manifest
1163 1163 # We don't know which manifests are missing yet
1164 1164 msng_mnfst_set = {}
1165 1165 # Nor do we know which filenodes are missing.
1166 1166 msng_filenode_set = {}
1167 1167
1168 1168 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1169 1169 junk = None
1170 1170
1171 1171 # A changeset always belongs to itself, so the changenode lookup
1172 1172 # function for a changenode is identity.
1173 1173 def identity(x):
1174 1174 return x
1175 1175
1176 1176 # A function generating function. Sets up an environment for the
1177 1177 # inner function.
1178 1178 def cmp_by_rev_func(revlog):
1179 1179 # Compare two nodes by their revision number in the environment's
1180 1180 # revision history. Since the revision number both represents the
1181 1181 # most efficient order to read the nodes in, and represents a
1182 1182 # topological sorting of the nodes, this function is often useful.
1183 1183 def cmp_by_rev(a, b):
1184 1184 return cmp(revlog.rev(a), revlog.rev(b))
1185 1185 return cmp_by_rev
1186 1186
1187 1187 # If we determine that a particular file or manifest node must be a
1188 1188 # node that the recipient of the changegroup will already have, we can
1189 1189 # also assume the recipient will have all the parents. This function
1190 1190 # prunes them from the set of missing nodes.
1191 1191 def prune_parents(revlog, hasset, msngset):
1192 1192 haslst = hasset.keys()
1193 1193 haslst.sort(cmp_by_rev_func(revlog))
1194 1194 for node in haslst:
1195 1195 parentlst = [p for p in revlog.parents(node) if p != nullid]
1196 1196 while parentlst:
1197 1197 n = parentlst.pop()
1198 1198 if n not in hasset:
1199 1199 hasset[n] = 1
1200 1200 p = [p for p in revlog.parents(n) if p != nullid]
1201 1201 parentlst.extend(p)
1202 1202 for n in hasset:
1203 1203 msngset.pop(n, None)
1204 1204
1205 1205 # This is a function generating function used to set up an environment
1206 1206 # for the inner function to execute in.
1207 1207 def manifest_and_file_collector(changedfileset):
1208 1208 # This is an information gathering function that gathers
1209 1209 # information from each changeset node that goes out as part of
1210 1210 # the changegroup. The information gathered is a list of which
1211 1211 # manifest nodes are potentially required (the recipient may
1212 1212 # already have them) and total list of all files which were
1213 1213 # changed in any changeset in the changegroup.
1214 1214 #
1215 1215 # We also remember the first changenode we saw any manifest
1216 1216 # referenced by so we can later determine which changenode 'owns'
1217 1217 # the manifest.
1218 1218 def collect_manifests_and_files(clnode):
1219 1219 c = cl.read(clnode)
1220 1220 for f in c[3]:
1221 1221 # This is to make sure we only have one instance of each
1222 1222 # filename string for each filename.
1223 1223 changedfileset.setdefault(f, f)
1224 1224 msng_mnfst_set.setdefault(c[0], clnode)
1225 1225 return collect_manifests_and_files
1226 1226
1227 1227 # Figure out which manifest nodes (of the ones we think might be part
1228 1228 # of the changegroup) the recipient must know about and remove them
1229 1229 # from the changegroup.
1230 1230 def prune_manifests():
1231 1231 has_mnfst_set = {}
1232 1232 for n in msng_mnfst_set:
1233 1233 # If a 'missing' manifest thinks it belongs to a changenode
1234 1234 # the recipient is assumed to have, obviously the recipient
1235 1235 # must have that manifest.
1236 1236 linknode = cl.node(mnfst.linkrev(n))
1237 1237 if linknode in has_cl_set:
1238 1238 has_mnfst_set[n] = 1
1239 1239 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1240 1240
1241 1241 # Use the information collected in collect_manifests_and_files to say
1242 1242 # which changenode any manifestnode belongs to.
1243 1243 def lookup_manifest_link(mnfstnode):
1244 1244 return msng_mnfst_set[mnfstnode]
1245 1245
1246 1246 # A function generating function that sets up the initial environment
1247 1247 # the inner function.
1248 1248 def filenode_collector(changedfiles):
1249 1249 next_rev = [0]
1250 1250 # This gathers information from each manifestnode included in the
1251 1251 # changegroup about which filenodes the manifest node references
1252 1252 # so we can include those in the changegroup too.
1253 1253 #
1254 1254 # It also remembers which changenode each filenode belongs to. It
1255 1255 # does this by assuming the a filenode belongs to the changenode
1256 1256 # the first manifest that references it belongs to.
1257 1257 def collect_msng_filenodes(mnfstnode):
1258 1258 r = mnfst.rev(mnfstnode)
1259 1259 if r == next_rev[0]:
1260 1260 # If the last rev we looked at was the one just previous,
1261 1261 # we only need to see a diff.
1262 1262 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1263 1263 # For each line in the delta
1264 1264 for dline in delta.splitlines():
1265 1265 # get the filename and filenode for that line
1266 1266 f, fnode = dline.split('\0')
1267 1267 fnode = bin(fnode[:40])
1268 1268 f = changedfiles.get(f, None)
1269 1269 # And if the file is in the list of files we care
1270 1270 # about.
1271 1271 if f is not None:
1272 1272 # Get the changenode this manifest belongs to
1273 1273 clnode = msng_mnfst_set[mnfstnode]
1274 1274 # Create the set of filenodes for the file if
1275 1275 # there isn't one already.
1276 1276 ndset = msng_filenode_set.setdefault(f, {})
1277 1277 # And set the filenode's changelog node to the
1278 1278 # manifest's if it hasn't been set already.
1279 1279 ndset.setdefault(fnode, clnode)
1280 1280 else:
1281 1281 # Otherwise we need a full manifest.
1282 1282 m = mnfst.read(mnfstnode)
1283 1283 # For every file in we care about.
1284 1284 for f in changedfiles:
1285 1285 fnode = m.get(f, None)
1286 1286 # If it's in the manifest
1287 1287 if fnode is not None:
1288 1288 # See comments above.
1289 1289 clnode = msng_mnfst_set[mnfstnode]
1290 1290 ndset = msng_filenode_set.setdefault(f, {})
1291 1291 ndset.setdefault(fnode, clnode)
1292 1292 # Remember the revision we hope to see next.
1293 1293 next_rev[0] = r + 1
1294 1294 return collect_msng_filenodes
1295 1295
1296 1296 # We have a list of filenodes we think we need for a file, lets remove
1297 1297 # all those we now the recipient must have.
1298 1298 def prune_filenodes(f, filerevlog):
1299 1299 msngset = msng_filenode_set[f]
1300 1300 hasset = {}
1301 1301 # If a 'missing' filenode thinks it belongs to a changenode we
1302 1302 # assume the recipient must have, then the recipient must have
1303 1303 # that filenode.
1304 1304 for n in msngset:
1305 1305 clnode = cl.node(filerevlog.linkrev(n))
1306 1306 if clnode in has_cl_set:
1307 1307 hasset[n] = 1
1308 1308 prune_parents(filerevlog, hasset, msngset)
1309 1309
1310 1310 # A function generator function that sets up the a context for the
1311 1311 # inner function.
1312 1312 def lookup_filenode_link_func(fname):
1313 1313 msngset = msng_filenode_set[fname]
1314 1314 # Lookup the changenode the filenode belongs to.
1315 1315 def lookup_filenode_link(fnode):
1316 1316 return msngset[fnode]
1317 1317 return lookup_filenode_link
1318 1318
1319 1319 # Now that we have all theses utility functions to help out and
1320 1320 # logically divide up the task, generate the group.
1321 1321 def gengroup():
1322 1322 # The set of changed files starts empty.
1323 1323 changedfiles = {}
1324 1324 # Create a changenode group generator that will call our functions
1325 1325 # back to lookup the owning changenode and collect information.
1326 1326 group = cl.group(msng_cl_lst, identity,
1327 1327 manifest_and_file_collector(changedfiles))
1328 1328 for chnk in group:
1329 1329 yield chnk
1330 1330
1331 1331 # The list of manifests has been collected by the generator
1332 1332 # calling our functions back.
1333 1333 prune_manifests()
1334 1334 msng_mnfst_lst = msng_mnfst_set.keys()
1335 1335 # Sort the manifestnodes by revision number.
1336 1336 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1337 1337 # Create a generator for the manifestnodes that calls our lookup
1338 1338 # and data collection functions back.
1339 1339 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1340 1340 filenode_collector(changedfiles))
1341 1341 for chnk in group:
1342 1342 yield chnk
1343 1343
1344 1344 # These are no longer needed, dereference and toss the memory for
1345 1345 # them.
1346 1346 msng_mnfst_lst = None
1347 1347 msng_mnfst_set.clear()
1348 1348
1349 1349 changedfiles = changedfiles.keys()
1350 1350 changedfiles.sort()
1351 1351 # Go through all our files in order sorted by name.
1352 1352 for fname in changedfiles:
1353 1353 filerevlog = self.file(fname)
1354 1354 # Toss out the filenodes that the recipient isn't really
1355 1355 # missing.
1356 1356 if msng_filenode_set.has_key(fname):
1357 1357 prune_filenodes(fname, filerevlog)
1358 1358 msng_filenode_lst = msng_filenode_set[fname].keys()
1359 1359 else:
1360 1360 msng_filenode_lst = []
1361 1361 # If any filenodes are left, generate the group for them,
1362 1362 # otherwise don't bother.
1363 1363 if len(msng_filenode_lst) > 0:
1364 1364 yield changegroup.genchunk(fname)
1365 1365 # Sort the filenodes by their revision #
1366 1366 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1367 1367 # Create a group generator and only pass in a changenode
1368 1368 # lookup function as we need to collect no information
1369 1369 # from filenodes.
1370 1370 group = filerevlog.group(msng_filenode_lst,
1371 1371 lookup_filenode_link_func(fname))
1372 1372 for chnk in group:
1373 1373 yield chnk
1374 1374 if msng_filenode_set.has_key(fname):
1375 1375 # Don't need this anymore, toss it to free memory.
1376 1376 del msng_filenode_set[fname]
1377 1377 # Signal that no more groups are left.
1378 1378 yield changegroup.closechunk()
1379 1379
1380 1380 if msng_cl_lst:
1381 1381 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1382 1382
1383 1383 return util.chunkbuffer(gengroup())
1384 1384
1385 1385 def changegroup(self, basenodes, source):
1386 1386 """Generate a changegroup of all nodes that we have that a recipient
1387 1387 doesn't.
1388 1388
1389 1389 This is much easier than the previous function as we can assume that
1390 1390 the recipient has any changenode we aren't sending them."""
1391 1391
1392 1392 self.hook('preoutgoing', throw=True, source=source)
1393 1393
1394 1394 cl = self.changelog
1395 1395 nodes = cl.nodesbetween(basenodes, None)[0]
1396 1396 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1397 1397
1398 1398 def identity(x):
1399 1399 return x
1400 1400
1401 1401 def gennodelst(revlog):
1402 1402 for r in xrange(0, revlog.count()):
1403 1403 n = revlog.node(r)
1404 1404 if revlog.linkrev(n) in revset:
1405 1405 yield n
1406 1406
1407 1407 def changed_file_collector(changedfileset):
1408 1408 def collect_changed_files(clnode):
1409 1409 c = cl.read(clnode)
1410 1410 for fname in c[3]:
1411 1411 changedfileset[fname] = 1
1412 1412 return collect_changed_files
1413 1413
1414 1414 def lookuprevlink_func(revlog):
1415 1415 def lookuprevlink(n):
1416 1416 return cl.node(revlog.linkrev(n))
1417 1417 return lookuprevlink
1418 1418
1419 1419 def gengroup():
1420 1420 # construct a list of all changed files
1421 1421 changedfiles = {}
1422 1422
1423 1423 for chnk in cl.group(nodes, identity,
1424 1424 changed_file_collector(changedfiles)):
1425 1425 yield chnk
1426 1426 changedfiles = changedfiles.keys()
1427 1427 changedfiles.sort()
1428 1428
1429 1429 mnfst = self.manifest
1430 1430 nodeiter = gennodelst(mnfst)
1431 1431 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1432 1432 yield chnk
1433 1433
1434 1434 for fname in changedfiles:
1435 1435 filerevlog = self.file(fname)
1436 1436 nodeiter = gennodelst(filerevlog)
1437 1437 nodeiter = list(nodeiter)
1438 1438 if nodeiter:
1439 1439 yield changegroup.genchunk(fname)
1440 1440 lookup = lookuprevlink_func(filerevlog)
1441 1441 for chnk in filerevlog.group(nodeiter, lookup):
1442 1442 yield chnk
1443 1443
1444 1444 yield changegroup.closechunk()
1445 1445
1446 1446 if nodes:
1447 1447 self.hook('outgoing', node=hex(nodes[0]), source=source)
1448 1448
1449 1449 return util.chunkbuffer(gengroup())
1450 1450
1451 1451 def addchangegroup(self, source):
1452 1452 """add changegroup to repo.
1453 1453 returns number of heads modified or added + 1."""
1454 1454
1455 1455 def csmap(x):
1456 1456 self.ui.debug(_("add changeset %s\n") % short(x))
1457 1457 return cl.count()
1458 1458
1459 1459 def revmap(x):
1460 1460 return cl.rev(x)
1461 1461
1462 1462 if not source:
1463 1463 return 0
1464 1464
1465 1465 self.hook('prechangegroup', throw=True)
1466 1466
1467 1467 changesets = files = revisions = 0
1468 1468
1469 1469 tr = self.transaction()
1470 1470
1471 1471 # write changelog and manifest data to temp files so
1472 1472 # concurrent readers will not see inconsistent view
1473 1473 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1474 1474
1475 1475 oldheads = len(cl.heads())
1476 1476
1477 1477 # pull off the changeset group
1478 1478 self.ui.status(_("adding changesets\n"))
1479 1479 co = cl.tip()
1480 1480 chunkiter = changegroup.chunkiter(source)
1481 1481 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1482 1482 cnr, cor = map(cl.rev, (cn, co))
1483 1483 if cn == nullid:
1484 1484 cnr = cor
1485 1485 changesets = cnr - cor
1486 1486
1487 1487 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1488 1488
1489 1489 # pull off the manifest group
1490 1490 self.ui.status(_("adding manifests\n"))
1491 1491 mm = mf.tip()
1492 1492 chunkiter = changegroup.chunkiter(source)
1493 1493 mo = mf.addgroup(chunkiter, revmap, tr)
1494 1494
1495 1495 # process the files
1496 1496 self.ui.status(_("adding file changes\n"))
1497 1497 while 1:
1498 1498 f = changegroup.getchunk(source)
1499 1499 if not f:
1500 1500 break
1501 1501 self.ui.debug(_("adding %s revisions\n") % f)
1502 1502 fl = self.file(f)
1503 1503 o = fl.count()
1504 1504 chunkiter = changegroup.chunkiter(source)
1505 1505 n = fl.addgroup(chunkiter, revmap, tr)
1506 1506 revisions += fl.count() - o
1507 1507 files += 1
1508 1508
1509 1509 # write order here is important so concurrent readers will see
1510 1510 # consistent view of repo
1511 1511 mf.writedata()
1512 1512 cl.writedata()
1513 1513
1514 1514 # make changelog and manifest see real files again
1515 1515 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1516 1516 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1517 1517 self.changelog.checkinlinesize(tr)
1518 1518 self.manifest.checkinlinesize(tr)
1519 1519
1520 1520 newheads = len(self.changelog.heads())
1521 1521 heads = ""
1522 1522 if oldheads and newheads > oldheads:
1523 1523 heads = _(" (+%d heads)") % (newheads - oldheads)
1524 1524
1525 1525 self.ui.status(_("added %d changesets"
1526 1526 " with %d changes to %d files%s\n")
1527 1527 % (changesets, revisions, files, heads))
1528 1528
1529 1529 self.hook('pretxnchangegroup', throw=True,
1530 1530 node=hex(self.changelog.node(cor+1)))
1531 1531
1532 1532 tr.close()
1533 1533
1534 1534 if changesets > 0:
1535 1535 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1536 1536
1537 1537 for i in range(cor + 1, cnr + 1):
1538 1538 self.hook("incoming", node=hex(self.changelog.node(i)))
1539 1539
1540 1540 return newheads - oldheads + 1
1541 1541
1542 1542 def update(self, node, allow=False, force=False, choose=None,
1543 1543 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1544 1544 pl = self.dirstate.parents()
1545 1545 if not force and pl[1] != nullid:
1546 1546 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1547 1547 return 1
1548 1548
1549 1549 err = False
1550 1550
1551 1551 p1, p2 = pl[0], node
1552 1552 pa = self.changelog.ancestor(p1, p2)
1553 1553 m1n = self.changelog.read(p1)[0]
1554 1554 m2n = self.changelog.read(p2)[0]
1555 1555 man = self.manifest.ancestor(m1n, m2n)
1556 1556 m1 = self.manifest.read(m1n)
1557 1557 mf1 = self.manifest.readflags(m1n)
1558 1558 m2 = self.manifest.read(m2n).copy()
1559 1559 mf2 = self.manifest.readflags(m2n)
1560 1560 ma = self.manifest.read(man)
1561 1561 mfa = self.manifest.readflags(man)
1562 1562
1563 1563 modified, added, removed, deleted, unknown = self.changes()
1564 1564
1565 1565 # is this a jump, or a merge? i.e. is there a linear path
1566 1566 # from p1 to p2?
1567 1567 linear_path = (pa == p1 or pa == p2)
1568 1568
1569 1569 if allow and linear_path:
1570 1570 raise util.Abort(_("there is nothing to merge, "
1571 1571 "just use 'hg update'"))
1572 1572 if allow and not forcemerge:
1573 1573 if modified or added or removed:
1574 1574 raise util.Abort(_("outstanding uncommitted changes"))
1575 1575 if not forcemerge and not force:
1576 1576 for f in unknown:
1577 1577 if f in m2:
1578 1578 t1 = self.wread(f)
1579 1579 t2 = self.file(f).read(m2[f])
1580 1580 if cmp(t1, t2) != 0:
1581 1581 raise util.Abort(_("'%s' already exists in the working"
1582 1582 " dir and differs from remote") % f)
1583 1583
1584 1584 # resolve the manifest to determine which files
1585 1585 # we care about merging
1586 1586 self.ui.note(_("resolving manifests\n"))
1587 1587 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1588 1588 (force, allow, moddirstate, linear_path))
1589 1589 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1590 1590 (short(man), short(m1n), short(m2n)))
1591 1591
1592 1592 merge = {}
1593 1593 get = {}
1594 1594 remove = []
1595 1595
1596 1596 # construct a working dir manifest
1597 1597 mw = m1.copy()
1598 1598 mfw = mf1.copy()
1599 1599 umap = dict.fromkeys(unknown)
1600 1600
1601 1601 for f in added + modified + unknown:
1602 1602 mw[f] = ""
1603 1603 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1604 1604
1605 1605 if moddirstate and not wlock:
1606 1606 wlock = self.wlock()
1607 1607
1608 1608 for f in deleted + removed:
1609 1609 if f in mw:
1610 1610 del mw[f]
1611 1611
1612 1612 # If we're jumping between revisions (as opposed to merging),
1613 1613 # and if neither the working directory nor the target rev has
1614 1614 # the file, then we need to remove it from the dirstate, to
1615 1615 # prevent the dirstate from listing the file when it is no
1616 1616 # longer in the manifest.
1617 1617 if moddirstate and linear_path and f not in m2:
1618 1618 self.dirstate.forget((f,))
1619 1619
1620 1620 # Compare manifests
1621 1621 for f, n in mw.iteritems():
1622 1622 if choose and not choose(f):
1623 1623 continue
1624 1624 if f in m2:
1625 1625 s = 0
1626 1626
1627 1627 # is the wfile new since m1, and match m2?
1628 1628 if f not in m1:
1629 1629 t1 = self.wread(f)
1630 1630 t2 = self.file(f).read(m2[f])
1631 1631 if cmp(t1, t2) == 0:
1632 1632 n = m2[f]
1633 1633 del t1, t2
1634 1634
1635 1635 # are files different?
1636 1636 if n != m2[f]:
1637 1637 a = ma.get(f, nullid)
1638 1638 # are both different from the ancestor?
1639 1639 if n != a and m2[f] != a:
1640 1640 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1641 1641 # merge executable bits
1642 1642 # "if we changed or they changed, change in merge"
1643 1643 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1644 1644 mode = ((a^b) | (a^c)) ^ a
1645 1645 merge[f] = (m1.get(f, nullid), m2[f], mode)
1646 1646 s = 1
1647 1647 # are we clobbering?
1648 1648 # is remote's version newer?
1649 1649 # or are we going back in time?
1650 1650 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1651 1651 self.ui.debug(_(" remote %s is newer, get\n") % f)
1652 1652 get[f] = m2[f]
1653 1653 s = 1
1654 1654 elif f in umap or f in added:
1655 1655 # this unknown file is the same as the checkout
1656 1656 # we need to reset the dirstate if the file was added
1657 1657 get[f] = m2[f]
1658 1658
1659 1659 if not s and mfw[f] != mf2[f]:
1660 1660 if force:
1661 1661 self.ui.debug(_(" updating permissions for %s\n") % f)
1662 1662 util.set_exec(self.wjoin(f), mf2[f])
1663 1663 else:
1664 1664 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1665 1665 mode = ((a^b) | (a^c)) ^ a
1666 1666 if mode != b:
1667 1667 self.ui.debug(_(" updating permissions for %s\n")
1668 1668 % f)
1669 1669 util.set_exec(self.wjoin(f), mode)
1670 1670 del m2[f]
1671 1671 elif f in ma:
1672 1672 if n != ma[f]:
1673 1673 r = _("d")
1674 1674 if not force and (linear_path or allow):
1675 1675 r = self.ui.prompt(
1676 1676 (_(" local changed %s which remote deleted\n") % f) +
1677 1677 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1678 1678 if r == _("d"):
1679 1679 remove.append(f)
1680 1680 else:
1681 1681 self.ui.debug(_("other deleted %s\n") % f)
1682 1682 remove.append(f) # other deleted it
1683 1683 else:
1684 1684 # file is created on branch or in working directory
1685 1685 if force and f not in umap:
1686 1686 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1687 1687 remove.append(f)
1688 1688 elif n == m1.get(f, nullid): # same as parent
1689 1689 if p2 == pa: # going backwards?
1690 1690 self.ui.debug(_("remote deleted %s\n") % f)
1691 1691 remove.append(f)
1692 1692 else:
1693 1693 self.ui.debug(_("local modified %s, keeping\n") % f)
1694 1694 else:
1695 1695 self.ui.debug(_("working dir created %s, keeping\n") % f)
1696 1696
1697 1697 for f, n in m2.iteritems():
1698 1698 if choose and not choose(f):
1699 1699 continue
1700 1700 if f[0] == "/":
1701 1701 continue
1702 1702 if f in ma and n != ma[f]:
1703 1703 r = _("k")
1704 1704 if not force and (linear_path or allow):
1705 1705 r = self.ui.prompt(
1706 1706 (_("remote changed %s which local deleted\n") % f) +
1707 1707 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1708 1708 if r == _("k"):
1709 1709 get[f] = n
1710 1710 elif f not in ma:
1711 1711 self.ui.debug(_("remote created %s\n") % f)
1712 1712 get[f] = n
1713 1713 else:
1714 1714 if force or p2 == pa: # going backwards?
1715 1715 self.ui.debug(_("local deleted %s, recreating\n") % f)
1716 1716 get[f] = n
1717 1717 else:
1718 1718 self.ui.debug(_("local deleted %s\n") % f)
1719 1719
1720 1720 del mw, m1, m2, ma
1721 1721
1722 1722 if force:
1723 1723 for f in merge:
1724 1724 get[f] = merge[f][1]
1725 1725 merge = {}
1726 1726
1727 1727 if linear_path or force:
1728 1728 # we don't need to do any magic, just jump to the new rev
1729 1729 branch_merge = False
1730 1730 p1, p2 = p2, nullid
1731 1731 else:
1732 1732 if not allow:
1733 1733 self.ui.status(_("this update spans a branch"
1734 1734 " affecting the following files:\n"))
1735 1735 fl = merge.keys() + get.keys()
1736 1736 fl.sort()
1737 1737 for f in fl:
1738 1738 cf = ""
1739 1739 if f in merge:
1740 1740 cf = _(" (resolve)")
1741 1741 self.ui.status(" %s%s\n" % (f, cf))
1742 1742 self.ui.warn(_("aborting update spanning branches!\n"))
1743 1743 self.ui.status(_("(use 'hg merge' to merge across branches"
1744 1744 " or 'hg update -C' to lose changes)\n"))
1745 1745 return 1
1746 1746 branch_merge = True
1747 1747
1748 1748 # get the files we don't need to change
1749 1749 files = get.keys()
1750 1750 files.sort()
1751 1751 for f in files:
1752 1752 if f[0] == "/":
1753 1753 continue
1754 1754 self.ui.note(_("getting %s\n") % f)
1755 1755 t = self.file(f).read(get[f])
1756 1756 self.wwrite(f, t)
1757 1757 util.set_exec(self.wjoin(f), mf2[f])
1758 1758 if moddirstate:
1759 1759 if branch_merge:
1760 1760 self.dirstate.update([f], 'n', st_mtime=-1)
1761 1761 else:
1762 1762 self.dirstate.update([f], 'n')
1763 1763
1764 1764 # merge the tricky bits
1765 1765 failedmerge = []
1766 1766 files = merge.keys()
1767 1767 files.sort()
1768 1768 xp1 = hex(p1)
1769 1769 xp2 = hex(p2)
1770 1770 for f in files:
1771 1771 self.ui.status(_("merging %s\n") % f)
1772 1772 my, other, flag = merge[f]
1773 1773 ret = self.merge3(f, my, other, xp1, xp2)
1774 1774 if ret:
1775 1775 err = True
1776 1776 failedmerge.append(f)
1777 1777 util.set_exec(self.wjoin(f), flag)
1778 1778 if moddirstate:
1779 1779 if branch_merge:
1780 1780 # We've done a branch merge, mark this file as merged
1781 1781 # so that we properly record the merger later
1782 1782 self.dirstate.update([f], 'm')
1783 1783 else:
1784 1784 # We've update-merged a locally modified file, so
1785 1785 # we set the dirstate to emulate a normal checkout
1786 1786 # of that file some time in the past. Thus our
1787 1787 # merge will appear as a normal local file
1788 1788 # modification.
1789 1789 f_len = len(self.file(f).read(other))
1790 1790 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1791 1791
1792 1792 remove.sort()
1793 1793 for f in remove:
1794 1794 self.ui.note(_("removing %s\n") % f)
1795 1795 util.audit_path(f)
1796 1796 try:
1797 1797 util.unlink(self.wjoin(f))
1798 1798 except OSError, inst:
1799 1799 if inst.errno != errno.ENOENT:
1800 1800 self.ui.warn(_("update failed to remove %s: %s!\n") %
1801 1801 (f, inst.strerror))
1802 1802 if moddirstate:
1803 1803 if branch_merge:
1804 1804 self.dirstate.update(remove, 'r')
1805 1805 else:
1806 1806 self.dirstate.forget(remove)
1807 1807
1808 1808 if moddirstate:
1809 1809 self.dirstate.setparents(p1, p2)
1810 1810
1811 1811 if show_stats:
1812 1812 stats = ((len(get), _("updated")),
1813 1813 (len(merge) - len(failedmerge), _("merged")),
1814 1814 (len(remove), _("removed")),
1815 1815 (len(failedmerge), _("unresolved")))
1816 1816 note = ", ".join([_("%d files %s") % s for s in stats])
1817 1817 self.ui.status("%s\n" % note)
1818 1818 if moddirstate:
1819 1819 if branch_merge:
1820 1820 if failedmerge:
1821 1821 self.ui.status(_("There are unresolved merges,"
1822 1822 " you can redo the full merge using:\n"
1823 1823 " hg update -C %s\n"
1824 1824 " hg merge %s\n"
1825 1825 % (self.changelog.rev(p1),
1826 1826 self.changelog.rev(p2))))
1827 1827 else:
1828 1828 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1829 1829 elif failedmerge:
1830 1830 self.ui.status(_("There are unresolved merges with"
1831 1831 " locally modified files.\n"))
1832 1832
1833 1833 return err
1834 1834
1835 1835 def merge3(self, fn, my, other, p1, p2):
1836 1836 """perform a 3-way merge in the working directory"""
1837 1837
1838 1838 def temp(prefix, node):
1839 1839 pre = "%s~%s." % (os.path.basename(fn), prefix)
1840 1840 (fd, name) = tempfile.mkstemp(prefix=pre)
1841 1841 f = os.fdopen(fd, "wb")
1842 1842 self.wwrite(fn, fl.read(node), f)
1843 1843 f.close()
1844 1844 return name
1845 1845
1846 1846 fl = self.file(fn)
1847 1847 base = fl.ancestor(my, other)
1848 1848 a = self.wjoin(fn)
1849 1849 b = temp("base", base)
1850 1850 c = temp("other", other)
1851 1851
1852 1852 self.ui.note(_("resolving %s\n") % fn)
1853 1853 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1854 1854 (fn, short(my), short(other), short(base)))
1855 1855
1856 1856 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1857 1857 or "hgmerge")
1858 1858 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1859 1859 environ={'HG_FILE': fn,
1860 1860 'HG_MY_NODE': p1,
1861 1861 'HG_OTHER_NODE': p2,
1862 1862 'HG_FILE_MY_NODE': hex(my),
1863 1863 'HG_FILE_OTHER_NODE': hex(other),
1864 1864 'HG_FILE_BASE_NODE': hex(base)})
1865 1865 if r:
1866 1866 self.ui.warn(_("merging %s failed!\n") % fn)
1867 1867
1868 1868 os.unlink(b)
1869 1869 os.unlink(c)
1870 1870 return r
1871 1871
1872 1872 def verify(self):
1873 1873 filelinkrevs = {}
1874 1874 filenodes = {}
1875 1875 changesets = revisions = files = 0
1876 1876 errors = [0]
1877 1877 warnings = [0]
1878 1878 neededmanifests = {}
1879 1879
1880 1880 def err(msg):
1881 1881 self.ui.warn(msg + "\n")
1882 1882 errors[0] += 1
1883 1883
1884 1884 def warn(msg):
1885 1885 self.ui.warn(msg + "\n")
1886 1886 warnings[0] += 1
1887 1887
1888 1888 def checksize(obj, name):
1889 1889 d = obj.checksize()
1890 1890 if d[0]:
1891 1891 err(_("%s data length off by %d bytes") % (name, d[0]))
1892 1892 if d[1]:
1893 1893 err(_("%s index contains %d extra bytes") % (name, d[1]))
1894 1894
1895 1895 def checkversion(obj, name):
1896 1896 if obj.version != revlog.REVLOGV0:
1897 1897 if not revlogv1:
1898 1898 warn(_("warning: `%s' uses revlog format 1") % name)
1899 1899 elif revlogv1:
1900 1900 warn(_("warning: `%s' uses revlog format 0") % name)
1901 1901
1902 1902 revlogv1 = self.revlogversion != revlog.REVLOGV0
1903 1903 if self.ui.verbose or revlogv1 != self.revlogv1:
1904 1904 self.ui.status(_("repository uses revlog format %d\n") %
1905 1905 (revlogv1 and 1 or 0))
1906 1906
1907 1907 seen = {}
1908 1908 self.ui.status(_("checking changesets\n"))
1909 1909 checksize(self.changelog, "changelog")
1910 1910
1911 1911 for i in range(self.changelog.count()):
1912 1912 changesets += 1
1913 1913 n = self.changelog.node(i)
1914 1914 l = self.changelog.linkrev(n)
1915 1915 if l != i:
1916 1916 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1917 1917 if n in seen:
1918 1918 err(_("duplicate changeset at revision %d") % i)
1919 1919 seen[n] = 1
1920 1920
1921 1921 for p in self.changelog.parents(n):
1922 1922 if p not in self.changelog.nodemap:
1923 1923 err(_("changeset %s has unknown parent %s") %
1924 1924 (short(n), short(p)))
1925 1925 try:
1926 1926 changes = self.changelog.read(n)
1927 1927 except KeyboardInterrupt:
1928 1928 self.ui.warn(_("interrupted"))
1929 1929 raise
1930 1930 except Exception, inst:
1931 1931 err(_("unpacking changeset %s: %s") % (short(n), inst))
1932 1932 continue
1933 1933
1934 1934 neededmanifests[changes[0]] = n
1935 1935
1936 1936 for f in changes[3]:
1937 1937 filelinkrevs.setdefault(f, []).append(i)
1938 1938
1939 1939 seen = {}
1940 1940 self.ui.status(_("checking manifests\n"))
1941 1941 checkversion(self.manifest, "manifest")
1942 1942 checksize(self.manifest, "manifest")
1943 1943
1944 1944 for i in range(self.manifest.count()):
1945 1945 n = self.manifest.node(i)
1946 1946 l = self.manifest.linkrev(n)
1947 1947
1948 1948 if l < 0 or l >= self.changelog.count():
1949 1949 err(_("bad manifest link (%d) at revision %d") % (l, i))
1950 1950
1951 1951 if n in neededmanifests:
1952 1952 del neededmanifests[n]
1953 1953
1954 1954 if n in seen:
1955 1955 err(_("duplicate manifest at revision %d") % i)
1956 1956
1957 1957 seen[n] = 1
1958 1958
1959 1959 for p in self.manifest.parents(n):
1960 1960 if p not in self.manifest.nodemap:
1961 1961 err(_("manifest %s has unknown parent %s") %
1962 1962 (short(n), short(p)))
1963 1963
1964 1964 try:
1965 1965 delta = mdiff.patchtext(self.manifest.delta(n))
1966 1966 except KeyboardInterrupt:
1967 1967 self.ui.warn(_("interrupted"))
1968 1968 raise
1969 1969 except Exception, inst:
1970 1970 err(_("unpacking manifest %s: %s") % (short(n), inst))
1971 1971 continue
1972 1972
1973 1973 try:
1974 1974 ff = [ l.split('\0') for l in delta.splitlines() ]
1975 1975 for f, fn in ff:
1976 1976 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1977 1977 except (ValueError, TypeError), inst:
1978 1978 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1979 1979
1980 1980 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1981 1981
1982 1982 for m, c in neededmanifests.items():
1983 1983 err(_("Changeset %s refers to unknown manifest %s") %
1984 1984 (short(m), short(c)))
1985 1985 del neededmanifests
1986 1986
1987 1987 for f in filenodes:
1988 1988 if f not in filelinkrevs:
1989 1989 err(_("file %s in manifest but not in changesets") % f)
1990 1990
1991 1991 for f in filelinkrevs:
1992 1992 if f not in filenodes:
1993 1993 err(_("file %s in changeset but not in manifest") % f)
1994 1994
1995 1995 self.ui.status(_("checking files\n"))
1996 1996 ff = filenodes.keys()
1997 1997 ff.sort()
1998 1998 for f in ff:
1999 1999 if f == "/dev/null":
2000 2000 continue
2001 2001 files += 1
2002 2002 if not f:
2003 2003 err(_("file without name in manifest %s") % short(n))
2004 2004 continue
2005 2005 fl = self.file(f)
2006 2006 checkversion(fl, f)
2007 2007 checksize(fl, f)
2008 2008
2009 2009 nodes = {nullid: 1}
2010 2010 seen = {}
2011 2011 for i in range(fl.count()):
2012 2012 revisions += 1
2013 2013 n = fl.node(i)
2014 2014
2015 2015 if n in seen:
2016 2016 err(_("%s: duplicate revision %d") % (f, i))
2017 2017 if n not in filenodes[f]:
2018 2018 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2019 2019 else:
2020 2020 del filenodes[f][n]
2021 2021
2022 2022 flr = fl.linkrev(n)
2023 2023 if flr not in filelinkrevs.get(f, []):
2024 2024 err(_("%s:%s points to unexpected changeset %d")
2025 2025 % (f, short(n), flr))
2026 2026 else:
2027 2027 filelinkrevs[f].remove(flr)
2028 2028
2029 2029 # verify contents
2030 2030 try:
2031 2031 t = fl.read(n)
2032 2032 except KeyboardInterrupt:
2033 2033 self.ui.warn(_("interrupted"))
2034 2034 raise
2035 2035 except Exception, inst:
2036 2036 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2037 2037
2038 2038 # verify parents
2039 2039 (p1, p2) = fl.parents(n)
2040 2040 if p1 not in nodes:
2041 2041 err(_("file %s:%s unknown parent 1 %s") %
2042 2042 (f, short(n), short(p1)))
2043 2043 if p2 not in nodes:
2044 2044 err(_("file %s:%s unknown parent 2 %s") %
2045 2045 (f, short(n), short(p1)))
2046 2046 nodes[n] = 1
2047 2047
2048 2048 # cross-check
2049 2049 for node in filenodes[f]:
2050 2050 err(_("node %s in manifests not in %s") % (hex(node), f))
2051 2051
2052 2052 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2053 2053 (files, changesets, revisions))
2054 2054
2055 2055 if warnings[0]:
2056 2056 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2057 2057 if errors[0]:
2058 2058 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2059 2059 return 1
2060 2060
2061 2061 # used to avoid circular references so destructors work
2062 2062 def aftertrans(base):
2063 2063 p = base
2064 2064 def a():
2065 2065 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2066 2066 util.rename(os.path.join(p, "journal.dirstate"),
2067 2067 os.path.join(p, "undo.dirstate"))
2068 2068 return a
2069 2069
General Comments 0
You need to be logged in to leave comments. Login now