##// END OF EJS Templates
changegroup hooks: add source to hook parameters
Vadim Gelfer -
r2229:0ff326c2 default
parent child Browse files
Show More
@@ -1,2076 +1,2078 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "revlog traceback")
16 16
17 17 class localrepository(object):
18 18 def __del__(self):
19 19 self.transhandle = None
20 20 def __init__(self, parentui, path=None, create=0):
21 21 if not path:
22 22 p = os.getcwd()
23 23 while not os.path.isdir(os.path.join(p, ".hg")):
24 24 oldp = p
25 25 p = os.path.dirname(p)
26 26 if p == oldp:
27 27 raise repo.RepoError(_("no repo found"))
28 28 path = p
29 29 self.path = os.path.join(path, ".hg")
30 30
31 31 if not create and not os.path.isdir(self.path):
32 32 raise repo.RepoError(_("repository %s not found") % path)
33 33
34 34 self.root = os.path.abspath(path)
35 35 self.origroot = path
36 36 self.ui = ui.ui(parentui=parentui)
37 37 self.opener = util.opener(self.path)
38 38 self.wopener = util.opener(self.root)
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 except IOError:
43 43 pass
44 44
45 45 v = self.ui.revlogopts
46 46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 48 fl = v.get('flags', None)
49 49 flags = 0
50 50 if fl != None:
51 51 for x in fl.split():
52 52 flags |= revlog.flagstr(x)
53 53 elif self.revlogv1:
54 54 flags = revlog.REVLOG_DEFAULT_FLAGS
55 55
56 56 v = self.revlogversion | flags
57 57 self.manifest = manifest.manifest(self.opener, v)
58 58 self.changelog = changelog.changelog(self.opener, v)
59 59
60 60 # the changelog might not have the inline index flag
61 61 # on. If the format of the changelog is the same as found in
62 62 # .hgrc, apply any flags found in the .hgrc as well.
63 63 # Otherwise, just version from the changelog
64 64 v = self.changelog.version
65 65 if v == self.revlogversion:
66 66 v |= flags
67 67 self.revlogversion = v
68 68
69 69 self.tagscache = None
70 70 self.nodetagscache = None
71 71 self.encodepats = None
72 72 self.decodepats = None
73 73 self.transhandle = None
74 74
75 75 if create:
76 76 os.mkdir(self.path)
77 77 os.mkdir(self.join("data"))
78 78
79 79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80 80
81 81 def hook(self, name, throw=False, **args):
82 82 def callhook(hname, funcname):
83 83 '''call python hook. hook is callable object, looked up as
84 84 name in python module. if callable returns "true", hook
85 85 fails, else passes. if hook raises exception, treated as
86 86 hook failure. exception propagates if throw is "true".
87 87
88 88 reason for "true" meaning "hook failed" is so that
89 89 unmodified commands (e.g. mercurial.commands.update) can
90 90 be run as hooks without wrappers to convert return values.'''
91 91
92 92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 93 d = funcname.rfind('.')
94 94 if d == -1:
95 95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 96 % (hname, funcname))
97 97 modname = funcname[:d]
98 98 try:
99 99 obj = __import__(modname)
100 100 except ImportError:
101 101 raise util.Abort(_('%s hook is invalid '
102 102 '(import of "%s" failed)') %
103 103 (hname, modname))
104 104 try:
105 105 for p in funcname.split('.')[1:]:
106 106 obj = getattr(obj, p)
107 107 except AttributeError, err:
108 108 raise util.Abort(_('%s hook is invalid '
109 109 '("%s" is not defined)') %
110 110 (hname, funcname))
111 111 if not callable(obj):
112 112 raise util.Abort(_('%s hook is invalid '
113 113 '("%s" is not callable)') %
114 114 (hname, funcname))
115 115 try:
116 116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 117 except (KeyboardInterrupt, util.SignalInterrupt):
118 118 raise
119 119 except Exception, exc:
120 120 if isinstance(exc, util.Abort):
121 121 self.ui.warn(_('error: %s hook failed: %s\n') %
122 122 (hname, exc.args[0] % exc.args[1:]))
123 123 else:
124 124 self.ui.warn(_('error: %s hook raised an exception: '
125 125 '%s\n') % (hname, exc))
126 126 if throw:
127 127 raise
128 128 if self.ui.traceback:
129 129 traceback.print_exc()
130 130 return True
131 131 if r:
132 132 if throw:
133 133 raise util.Abort(_('%s hook failed') % hname)
134 134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 135 return r
136 136
137 137 def runhook(name, cmd):
138 138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
140 140 [(k.upper(), v) for k, v in args.iteritems()])
141 141 r = util.system(cmd, environ=env, cwd=self.root)
142 142 if r:
143 143 desc, r = util.explain_exit(r)
144 144 if throw:
145 145 raise util.Abort(_('%s hook %s') % (name, desc))
146 146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
147 147 return r
148 148
149 149 r = False
150 150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
151 151 if hname.split(".", 1)[0] == name and cmd]
152 152 hooks.sort()
153 153 for hname, cmd in hooks:
154 154 if cmd.startswith('python:'):
155 155 r = callhook(hname, cmd[7:].strip()) or r
156 156 else:
157 157 r = runhook(hname, cmd) or r
158 158 return r
159 159
160 160 def tags(self):
161 161 '''return a mapping of tag to node'''
162 162 if not self.tagscache:
163 163 self.tagscache = {}
164 164
165 165 def parsetag(line, context):
166 166 if not line:
167 167 return
168 168 s = l.split(" ", 1)
169 169 if len(s) != 2:
170 170 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
171 171 return
172 172 node, key = s
173 173 try:
174 174 bin_n = bin(node)
175 175 except TypeError:
176 176 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
177 177 return
178 178 if bin_n not in self.changelog.nodemap:
179 179 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
180 180 return
181 181 self.tagscache[key.strip()] = bin_n
182 182
183 183 # read each head of the tags file, ending with the tip
184 184 # and add each tag found to the map, with "newer" ones
185 185 # taking precedence
186 186 fl = self.file(".hgtags")
187 187 h = fl.heads()
188 188 h.reverse()
189 189 for r in h:
190 190 count = 0
191 191 for l in fl.read(r).splitlines():
192 192 count += 1
193 193 parsetag(l, ".hgtags:%d" % count)
194 194
195 195 try:
196 196 f = self.opener("localtags")
197 197 count = 0
198 198 for l in f:
199 199 count += 1
200 200 parsetag(l, "localtags:%d" % count)
201 201 except IOError:
202 202 pass
203 203
204 204 self.tagscache['tip'] = self.changelog.tip()
205 205
206 206 return self.tagscache
207 207
208 208 def tagslist(self):
209 209 '''return a list of tags ordered by revision'''
210 210 l = []
211 211 for t, n in self.tags().items():
212 212 try:
213 213 r = self.changelog.rev(n)
214 214 except:
215 215 r = -2 # sort to the beginning of the list if unknown
216 216 l.append((r, t, n))
217 217 l.sort()
218 218 return [(t, n) for r, t, n in l]
219 219
220 220 def nodetags(self, node):
221 221 '''return the tags associated with a node'''
222 222 if not self.nodetagscache:
223 223 self.nodetagscache = {}
224 224 for t, n in self.tags().items():
225 225 self.nodetagscache.setdefault(n, []).append(t)
226 226 return self.nodetagscache.get(node, [])
227 227
228 228 def lookup(self, key):
229 229 try:
230 230 return self.tags()[key]
231 231 except KeyError:
232 232 try:
233 233 return self.changelog.lookup(key)
234 234 except:
235 235 raise repo.RepoError(_("unknown revision '%s'") % key)
236 236
237 237 def dev(self):
238 238 return os.stat(self.path).st_dev
239 239
240 240 def local(self):
241 241 return True
242 242
243 243 def join(self, f):
244 244 return os.path.join(self.path, f)
245 245
246 246 def wjoin(self, f):
247 247 return os.path.join(self.root, f)
248 248
249 249 def file(self, f):
250 250 if f[0] == '/':
251 251 f = f[1:]
252 252 return filelog.filelog(self.opener, f, self.revlogversion)
253 253
254 254 def getcwd(self):
255 255 return self.dirstate.getcwd()
256 256
257 257 def wfile(self, f, mode='r'):
258 258 return self.wopener(f, mode)
259 259
260 260 def wread(self, filename):
261 261 if self.encodepats == None:
262 262 l = []
263 263 for pat, cmd in self.ui.configitems("encode"):
264 264 mf = util.matcher(self.root, "", [pat], [], [])[1]
265 265 l.append((mf, cmd))
266 266 self.encodepats = l
267 267
268 268 data = self.wopener(filename, 'r').read()
269 269
270 270 for mf, cmd in self.encodepats:
271 271 if mf(filename):
272 272 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
273 273 data = util.filter(data, cmd)
274 274 break
275 275
276 276 return data
277 277
278 278 def wwrite(self, filename, data, fd=None):
279 279 if self.decodepats == None:
280 280 l = []
281 281 for pat, cmd in self.ui.configitems("decode"):
282 282 mf = util.matcher(self.root, "", [pat], [], [])[1]
283 283 l.append((mf, cmd))
284 284 self.decodepats = l
285 285
286 286 for mf, cmd in self.decodepats:
287 287 if mf(filename):
288 288 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
289 289 data = util.filter(data, cmd)
290 290 break
291 291
292 292 if fd:
293 293 return fd.write(data)
294 294 return self.wopener(filename, 'w').write(data)
295 295
296 296 def transaction(self):
297 297 tr = self.transhandle
298 298 if tr != None and tr.running():
299 299 return tr.nest()
300 300
301 301 # save dirstate for undo
302 302 try:
303 303 ds = self.opener("dirstate").read()
304 304 except IOError:
305 305 ds = ""
306 306 self.opener("journal.dirstate", "w").write(ds)
307 307
308 308 tr = transaction.transaction(self.ui.warn, self.opener,
309 309 self.join("journal"),
310 310 aftertrans(self.path))
311 311 self.transhandle = tr
312 312 return tr
313 313
314 314 def recover(self):
315 315 l = self.lock()
316 316 if os.path.exists(self.join("journal")):
317 317 self.ui.status(_("rolling back interrupted transaction\n"))
318 318 transaction.rollback(self.opener, self.join("journal"))
319 319 self.reload()
320 320 return True
321 321 else:
322 322 self.ui.warn(_("no interrupted transaction available\n"))
323 323 return False
324 324
325 325 def undo(self, wlock=None):
326 326 if not wlock:
327 327 wlock = self.wlock()
328 328 l = self.lock()
329 329 if os.path.exists(self.join("undo")):
330 330 self.ui.status(_("rolling back last transaction\n"))
331 331 transaction.rollback(self.opener, self.join("undo"))
332 332 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
333 333 self.reload()
334 334 self.wreload()
335 335 else:
336 336 self.ui.warn(_("no undo information available\n"))
337 337
338 338 def wreload(self):
339 339 self.dirstate.read()
340 340
341 341 def reload(self):
342 342 self.changelog.load()
343 343 self.manifest.load()
344 344 self.tagscache = None
345 345 self.nodetagscache = None
346 346
347 347 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
348 348 desc=None):
349 349 try:
350 350 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
351 351 except lock.LockHeld, inst:
352 352 if not wait:
353 353 raise
354 354 self.ui.warn(_("waiting for lock on %s held by %s\n") %
355 355 (desc, inst.args[0]))
356 356 # default to 600 seconds timeout
357 357 l = lock.lock(self.join(lockname),
358 358 int(self.ui.config("ui", "timeout") or 600),
359 359 releasefn, desc=desc)
360 360 if acquirefn:
361 361 acquirefn()
362 362 return l
363 363
364 364 def lock(self, wait=1):
365 365 return self.do_lock("lock", wait, acquirefn=self.reload,
366 366 desc=_('repository %s') % self.origroot)
367 367
368 368 def wlock(self, wait=1):
369 369 return self.do_lock("wlock", wait, self.dirstate.write,
370 370 self.wreload,
371 371 desc=_('working directory of %s') % self.origroot)
372 372
373 373 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
374 374 "determine whether a new filenode is needed"
375 375 fp1 = manifest1.get(filename, nullid)
376 376 fp2 = manifest2.get(filename, nullid)
377 377
378 378 if fp2 != nullid:
379 379 # is one parent an ancestor of the other?
380 380 fpa = filelog.ancestor(fp1, fp2)
381 381 if fpa == fp1:
382 382 fp1, fp2 = fp2, nullid
383 383 elif fpa == fp2:
384 384 fp2 = nullid
385 385
386 386 # is the file unmodified from the parent? report existing entry
387 387 if fp2 == nullid and text == filelog.read(fp1):
388 388 return (fp1, None, None)
389 389
390 390 return (None, fp1, fp2)
391 391
392 392 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
393 393 orig_parent = self.dirstate.parents()[0] or nullid
394 394 p1 = p1 or self.dirstate.parents()[0] or nullid
395 395 p2 = p2 or self.dirstate.parents()[1] or nullid
396 396 c1 = self.changelog.read(p1)
397 397 c2 = self.changelog.read(p2)
398 398 m1 = self.manifest.read(c1[0])
399 399 mf1 = self.manifest.readflags(c1[0])
400 400 m2 = self.manifest.read(c2[0])
401 401 changed = []
402 402
403 403 if orig_parent == p1:
404 404 update_dirstate = 1
405 405 else:
406 406 update_dirstate = 0
407 407
408 408 if not wlock:
409 409 wlock = self.wlock()
410 410 l = self.lock()
411 411 tr = self.transaction()
412 412 mm = m1.copy()
413 413 mfm = mf1.copy()
414 414 linkrev = self.changelog.count()
415 415 for f in files:
416 416 try:
417 417 t = self.wread(f)
418 418 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
419 419 r = self.file(f)
420 420 mfm[f] = tm
421 421
422 422 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
423 423 if entry:
424 424 mm[f] = entry
425 425 continue
426 426
427 427 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
428 428 changed.append(f)
429 429 if update_dirstate:
430 430 self.dirstate.update([f], "n")
431 431 except IOError:
432 432 try:
433 433 del mm[f]
434 434 del mfm[f]
435 435 if update_dirstate:
436 436 self.dirstate.forget([f])
437 437 except:
438 438 # deleted from p2?
439 439 pass
440 440
441 441 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
442 442 user = user or self.ui.username()
443 443 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
444 444 tr.close()
445 445 if update_dirstate:
446 446 self.dirstate.setparents(n, nullid)
447 447
448 448 def commit(self, files=None, text="", user=None, date=None,
449 449 match=util.always, force=False, lock=None, wlock=None):
450 450 commit = []
451 451 remove = []
452 452 changed = []
453 453
454 454 if files:
455 455 for f in files:
456 456 s = self.dirstate.state(f)
457 457 if s in 'nmai':
458 458 commit.append(f)
459 459 elif s == 'r':
460 460 remove.append(f)
461 461 else:
462 462 self.ui.warn(_("%s not tracked!\n") % f)
463 463 else:
464 464 modified, added, removed, deleted, unknown = self.changes(match=match)
465 465 commit = modified + added
466 466 remove = removed
467 467
468 468 p1, p2 = self.dirstate.parents()
469 469 c1 = self.changelog.read(p1)
470 470 c2 = self.changelog.read(p2)
471 471 m1 = self.manifest.read(c1[0])
472 472 mf1 = self.manifest.readflags(c1[0])
473 473 m2 = self.manifest.read(c2[0])
474 474
475 475 if not commit and not remove and not force and p2 == nullid:
476 476 self.ui.status(_("nothing changed\n"))
477 477 return None
478 478
479 479 xp1 = hex(p1)
480 480 if p2 == nullid: xp2 = ''
481 481 else: xp2 = hex(p2)
482 482
483 483 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
484 484
485 485 if not wlock:
486 486 wlock = self.wlock()
487 487 if not lock:
488 488 lock = self.lock()
489 489 tr = self.transaction()
490 490
491 491 # check in files
492 492 new = {}
493 493 linkrev = self.changelog.count()
494 494 commit.sort()
495 495 for f in commit:
496 496 self.ui.note(f + "\n")
497 497 try:
498 498 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
499 499 t = self.wread(f)
500 500 except IOError:
501 501 self.ui.warn(_("trouble committing %s!\n") % f)
502 502 raise
503 503
504 504 r = self.file(f)
505 505
506 506 meta = {}
507 507 cp = self.dirstate.copied(f)
508 508 if cp:
509 509 meta["copy"] = cp
510 510 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
511 511 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
512 512 fp1, fp2 = nullid, nullid
513 513 else:
514 514 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
515 515 if entry:
516 516 new[f] = entry
517 517 continue
518 518
519 519 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
520 520 # remember what we've added so that we can later calculate
521 521 # the files to pull from a set of changesets
522 522 changed.append(f)
523 523
524 524 # update manifest
525 525 m1 = m1.copy()
526 526 m1.update(new)
527 527 for f in remove:
528 528 if f in m1:
529 529 del m1[f]
530 530 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
531 531 (new, remove))
532 532
533 533 # add changeset
534 534 new = new.keys()
535 535 new.sort()
536 536
537 537 user = user or self.ui.username()
538 538 if not text:
539 539 edittext = [""]
540 540 if p2 != nullid:
541 541 edittext.append("HG: branch merge")
542 542 edittext.extend(["HG: changed %s" % f for f in changed])
543 543 edittext.extend(["HG: removed %s" % f for f in remove])
544 544 if not changed and not remove:
545 545 edittext.append("HG: no files changed")
546 546 edittext.append("")
547 547 # run editor in the repository root
548 548 olddir = os.getcwd()
549 549 os.chdir(self.root)
550 550 edittext = self.ui.edit("\n".join(edittext), user)
551 551 os.chdir(olddir)
552 552 if not edittext.rstrip():
553 553 return None
554 554 text = edittext
555 555
556 556 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
557 557 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
558 558 parent2=xp2)
559 559 tr.close()
560 560
561 561 self.dirstate.setparents(n)
562 562 self.dirstate.update(new, "n")
563 563 self.dirstate.forget(remove)
564 564
565 565 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
566 566 return n
567 567
568 568 def walk(self, node=None, files=[], match=util.always, badmatch=None):
569 569 if node:
570 570 fdict = dict.fromkeys(files)
571 571 for fn in self.manifest.read(self.changelog.read(node)[0]):
572 572 fdict.pop(fn, None)
573 573 if match(fn):
574 574 yield 'm', fn
575 575 for fn in fdict:
576 576 if badmatch and badmatch(fn):
577 577 if match(fn):
578 578 yield 'b', fn
579 579 else:
580 580 self.ui.warn(_('%s: No such file in rev %s\n') % (
581 581 util.pathto(self.getcwd(), fn), short(node)))
582 582 else:
583 583 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
584 584 yield src, fn
585 585
586 586 def changes(self, node1=None, node2=None, files=[], match=util.always,
587 587 wlock=None, show_ignored=None):
588 588 """return changes between two nodes or node and working directory
589 589
590 590 If node1 is None, use the first dirstate parent instead.
591 591 If node2 is None, compare node1 with working directory.
592 592 """
593 593
594 594 def fcmp(fn, mf):
595 595 t1 = self.wread(fn)
596 596 t2 = self.file(fn).read(mf.get(fn, nullid))
597 597 return cmp(t1, t2)
598 598
599 599 def mfmatches(node):
600 600 change = self.changelog.read(node)
601 601 mf = dict(self.manifest.read(change[0]))
602 602 for fn in mf.keys():
603 603 if not match(fn):
604 604 del mf[fn]
605 605 return mf
606 606
607 607 if node1:
608 608 # read the manifest from node1 before the manifest from node2,
609 609 # so that we'll hit the manifest cache if we're going through
610 610 # all the revisions in parent->child order.
611 611 mf1 = mfmatches(node1)
612 612
613 613 # are we comparing the working directory?
614 614 if not node2:
615 615 if not wlock:
616 616 try:
617 617 wlock = self.wlock(wait=0)
618 618 except lock.LockException:
619 619 wlock = None
620 620 lookup, modified, added, removed, deleted, unknown, ignored = (
621 621 self.dirstate.changes(files, match, show_ignored))
622 622
623 623 # are we comparing working dir against its parent?
624 624 if not node1:
625 625 if lookup:
626 626 # do a full compare of any files that might have changed
627 627 mf2 = mfmatches(self.dirstate.parents()[0])
628 628 for f in lookup:
629 629 if fcmp(f, mf2):
630 630 modified.append(f)
631 631 elif wlock is not None:
632 632 self.dirstate.update([f], "n")
633 633 else:
634 634 # we are comparing working dir against non-parent
635 635 # generate a pseudo-manifest for the working dir
636 636 mf2 = mfmatches(self.dirstate.parents()[0])
637 637 for f in lookup + modified + added:
638 638 mf2[f] = ""
639 639 for f in removed:
640 640 if f in mf2:
641 641 del mf2[f]
642 642 else:
643 643 # we are comparing two revisions
644 644 deleted, unknown, ignored = [], [], []
645 645 mf2 = mfmatches(node2)
646 646
647 647 if node1:
648 648 # flush lists from dirstate before comparing manifests
649 649 modified, added = [], []
650 650
651 651 for fn in mf2:
652 652 if mf1.has_key(fn):
653 653 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
654 654 modified.append(fn)
655 655 del mf1[fn]
656 656 else:
657 657 added.append(fn)
658 658
659 659 removed = mf1.keys()
660 660
661 661 # sort and return results:
662 662 for l in modified, added, removed, deleted, unknown, ignored:
663 663 l.sort()
664 664 if show_ignored is None:
665 665 return (modified, added, removed, deleted, unknown)
666 666 else:
667 667 return (modified, added, removed, deleted, unknown, ignored)
668 668
669 669 def add(self, list, wlock=None):
670 670 if not wlock:
671 671 wlock = self.wlock()
672 672 for f in list:
673 673 p = self.wjoin(f)
674 674 if not os.path.exists(p):
675 675 self.ui.warn(_("%s does not exist!\n") % f)
676 676 elif not os.path.isfile(p):
677 677 self.ui.warn(_("%s not added: only files supported currently\n")
678 678 % f)
679 679 elif self.dirstate.state(f) in 'an':
680 680 self.ui.warn(_("%s already tracked!\n") % f)
681 681 else:
682 682 self.dirstate.update([f], "a")
683 683
684 684 def forget(self, list, wlock=None):
685 685 if not wlock:
686 686 wlock = self.wlock()
687 687 for f in list:
688 688 if self.dirstate.state(f) not in 'ai':
689 689 self.ui.warn(_("%s not added!\n") % f)
690 690 else:
691 691 self.dirstate.forget([f])
692 692
693 693 def remove(self, list, unlink=False, wlock=None):
694 694 if unlink:
695 695 for f in list:
696 696 try:
697 697 util.unlink(self.wjoin(f))
698 698 except OSError, inst:
699 699 if inst.errno != errno.ENOENT:
700 700 raise
701 701 if not wlock:
702 702 wlock = self.wlock()
703 703 for f in list:
704 704 p = self.wjoin(f)
705 705 if os.path.exists(p):
706 706 self.ui.warn(_("%s still exists!\n") % f)
707 707 elif self.dirstate.state(f) == 'a':
708 708 self.dirstate.forget([f])
709 709 elif f not in self.dirstate:
710 710 self.ui.warn(_("%s not tracked!\n") % f)
711 711 else:
712 712 self.dirstate.update([f], "r")
713 713
714 714 def undelete(self, list, wlock=None):
715 715 p = self.dirstate.parents()[0]
716 716 mn = self.changelog.read(p)[0]
717 717 mf = self.manifest.readflags(mn)
718 718 m = self.manifest.read(mn)
719 719 if not wlock:
720 720 wlock = self.wlock()
721 721 for f in list:
722 722 if self.dirstate.state(f) not in "r":
723 723 self.ui.warn("%s not removed!\n" % f)
724 724 else:
725 725 t = self.file(f).read(m[f])
726 726 self.wwrite(f, t)
727 727 util.set_exec(self.wjoin(f), mf[f])
728 728 self.dirstate.update([f], "n")
729 729
730 730 def copy(self, source, dest, wlock=None):
731 731 p = self.wjoin(dest)
732 732 if not os.path.exists(p):
733 733 self.ui.warn(_("%s does not exist!\n") % dest)
734 734 elif not os.path.isfile(p):
735 735 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
736 736 else:
737 737 if not wlock:
738 738 wlock = self.wlock()
739 739 if self.dirstate.state(dest) == '?':
740 740 self.dirstate.update([dest], "a")
741 741 self.dirstate.copy(source, dest)
742 742
743 743 def heads(self, start=None):
744 744 heads = self.changelog.heads(start)
745 745 # sort the output in rev descending order
746 746 heads = [(-self.changelog.rev(h), h) for h in heads]
747 747 heads.sort()
748 748 return [n for (r, n) in heads]
749 749
750 750 # branchlookup returns a dict giving a list of branches for
751 751 # each head. A branch is defined as the tag of a node or
752 752 # the branch of the node's parents. If a node has multiple
753 753 # branch tags, tags are eliminated if they are visible from other
754 754 # branch tags.
755 755 #
756 756 # So, for this graph: a->b->c->d->e
757 757 # \ /
758 758 # aa -----/
759 759 # a has tag 2.6.12
760 760 # d has tag 2.6.13
761 761 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
762 762 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
763 763 # from the list.
764 764 #
765 765 # It is possible that more than one head will have the same branch tag.
766 766 # callers need to check the result for multiple heads under the same
767 767 # branch tag if that is a problem for them (ie checkout of a specific
768 768 # branch).
769 769 #
770 770 # passing in a specific branch will limit the depth of the search
771 771 # through the parents. It won't limit the branches returned in the
772 772 # result though.
773 773 def branchlookup(self, heads=None, branch=None):
774 774 if not heads:
775 775 heads = self.heads()
776 776 headt = [ h for h in heads ]
777 777 chlog = self.changelog
778 778 branches = {}
779 779 merges = []
780 780 seenmerge = {}
781 781
782 782 # traverse the tree once for each head, recording in the branches
783 783 # dict which tags are visible from this head. The branches
784 784 # dict also records which tags are visible from each tag
785 785 # while we traverse.
786 786 while headt or merges:
787 787 if merges:
788 788 n, found = merges.pop()
789 789 visit = [n]
790 790 else:
791 791 h = headt.pop()
792 792 visit = [h]
793 793 found = [h]
794 794 seen = {}
795 795 while visit:
796 796 n = visit.pop()
797 797 if n in seen:
798 798 continue
799 799 pp = chlog.parents(n)
800 800 tags = self.nodetags(n)
801 801 if tags:
802 802 for x in tags:
803 803 if x == 'tip':
804 804 continue
805 805 for f in found:
806 806 branches.setdefault(f, {})[n] = 1
807 807 branches.setdefault(n, {})[n] = 1
808 808 break
809 809 if n not in found:
810 810 found.append(n)
811 811 if branch in tags:
812 812 continue
813 813 seen[n] = 1
814 814 if pp[1] != nullid and n not in seenmerge:
815 815 merges.append((pp[1], [x for x in found]))
816 816 seenmerge[n] = 1
817 817 if pp[0] != nullid:
818 818 visit.append(pp[0])
819 819 # traverse the branches dict, eliminating branch tags from each
820 820 # head that are visible from another branch tag for that head.
821 821 out = {}
822 822 viscache = {}
823 823 for h in heads:
824 824 def visible(node):
825 825 if node in viscache:
826 826 return viscache[node]
827 827 ret = {}
828 828 visit = [node]
829 829 while visit:
830 830 x = visit.pop()
831 831 if x in viscache:
832 832 ret.update(viscache[x])
833 833 elif x not in ret:
834 834 ret[x] = 1
835 835 if x in branches:
836 836 visit[len(visit):] = branches[x].keys()
837 837 viscache[node] = ret
838 838 return ret
839 839 if h not in branches:
840 840 continue
841 841 # O(n^2), but somewhat limited. This only searches the
842 842 # tags visible from a specific head, not all the tags in the
843 843 # whole repo.
844 844 for b in branches[h]:
845 845 vis = False
846 846 for bb in branches[h].keys():
847 847 if b != bb:
848 848 if b in visible(bb):
849 849 vis = True
850 850 break
851 851 if not vis:
852 852 l = out.setdefault(h, [])
853 853 l[len(l):] = self.nodetags(b)
854 854 return out
855 855
856 856 def branches(self, nodes):
857 857 if not nodes:
858 858 nodes = [self.changelog.tip()]
859 859 b = []
860 860 for n in nodes:
861 861 t = n
862 862 while n:
863 863 p = self.changelog.parents(n)
864 864 if p[1] != nullid or p[0] == nullid:
865 865 b.append((t, n, p[0], p[1]))
866 866 break
867 867 n = p[0]
868 868 return b
869 869
870 870 def between(self, pairs):
871 871 r = []
872 872
873 873 for top, bottom in pairs:
874 874 n, l, i = top, [], 0
875 875 f = 1
876 876
877 877 while n != bottom:
878 878 p = self.changelog.parents(n)[0]
879 879 if i == f:
880 880 l.append(n)
881 881 f = f * 2
882 882 n = p
883 883 i += 1
884 884
885 885 r.append(l)
886 886
887 887 return r
888 888
889 889 def findincoming(self, remote, base=None, heads=None, force=False):
890 890 m = self.changelog.nodemap
891 891 search = []
892 892 fetch = {}
893 893 seen = {}
894 894 seenbranch = {}
895 895 if base == None:
896 896 base = {}
897 897
898 898 if not heads:
899 899 heads = remote.heads()
900 900
901 901 if self.changelog.tip() == nullid:
902 902 if heads != [nullid]:
903 903 return [nullid]
904 904 return []
905 905
906 906 # assume we're closer to the tip than the root
907 907 # and start by examining the heads
908 908 self.ui.status(_("searching for changes\n"))
909 909
910 910 unknown = []
911 911 for h in heads:
912 912 if h not in m:
913 913 unknown.append(h)
914 914 else:
915 915 base[h] = 1
916 916
917 917 if not unknown:
918 918 return []
919 919
920 920 rep = {}
921 921 reqcnt = 0
922 922
923 923 # search through remote branches
924 924 # a 'branch' here is a linear segment of history, with four parts:
925 925 # head, root, first parent, second parent
926 926 # (a branch always has two parents (or none) by definition)
927 927 unknown = remote.branches(unknown)
928 928 while unknown:
929 929 r = []
930 930 while unknown:
931 931 n = unknown.pop(0)
932 932 if n[0] in seen:
933 933 continue
934 934
935 935 self.ui.debug(_("examining %s:%s\n")
936 936 % (short(n[0]), short(n[1])))
937 937 if n[0] == nullid:
938 938 break
939 939 if n in seenbranch:
940 940 self.ui.debug(_("branch already found\n"))
941 941 continue
942 942 if n[1] and n[1] in m: # do we know the base?
943 943 self.ui.debug(_("found incomplete branch %s:%s\n")
944 944 % (short(n[0]), short(n[1])))
945 945 search.append(n) # schedule branch range for scanning
946 946 seenbranch[n] = 1
947 947 else:
948 948 if n[1] not in seen and n[1] not in fetch:
949 949 if n[2] in m and n[3] in m:
950 950 self.ui.debug(_("found new changeset %s\n") %
951 951 short(n[1]))
952 952 fetch[n[1]] = 1 # earliest unknown
953 953 base[n[2]] = 1 # latest known
954 954 continue
955 955
956 956 for a in n[2:4]:
957 957 if a not in rep:
958 958 r.append(a)
959 959 rep[a] = 1
960 960
961 961 seen[n[0]] = 1
962 962
963 963 if r:
964 964 reqcnt += 1
965 965 self.ui.debug(_("request %d: %s\n") %
966 966 (reqcnt, " ".join(map(short, r))))
967 967 for p in range(0, len(r), 10):
968 968 for b in remote.branches(r[p:p+10]):
969 969 self.ui.debug(_("received %s:%s\n") %
970 970 (short(b[0]), short(b[1])))
971 971 if b[0] in m:
972 972 self.ui.debug(_("found base node %s\n")
973 973 % short(b[0]))
974 974 base[b[0]] = 1
975 975 elif b[0] not in seen:
976 976 unknown.append(b)
977 977
978 978 # do binary search on the branches we found
979 979 while search:
980 980 n = search.pop(0)
981 981 reqcnt += 1
982 982 l = remote.between([(n[0], n[1])])[0]
983 983 l.append(n[1])
984 984 p = n[0]
985 985 f = 1
986 986 for i in l:
987 987 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
988 988 if i in m:
989 989 if f <= 2:
990 990 self.ui.debug(_("found new branch changeset %s\n") %
991 991 short(p))
992 992 fetch[p] = 1
993 993 base[i] = 1
994 994 else:
995 995 self.ui.debug(_("narrowed branch search to %s:%s\n")
996 996 % (short(p), short(i)))
997 997 search.append((p, i))
998 998 break
999 999 p, f = i, f * 2
1000 1000
1001 1001 # sanity check our fetch list
1002 1002 for f in fetch.keys():
1003 1003 if f in m:
1004 1004 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1005 1005
1006 1006 if base.keys() == [nullid]:
1007 1007 if force:
1008 1008 self.ui.warn(_("warning: repository is unrelated\n"))
1009 1009 else:
1010 1010 raise util.Abort(_("repository is unrelated"))
1011 1011
1012 1012 self.ui.note(_("found new changesets starting at ") +
1013 1013 " ".join([short(f) for f in fetch]) + "\n")
1014 1014
1015 1015 self.ui.debug(_("%d total queries\n") % reqcnt)
1016 1016
1017 1017 return fetch.keys()
1018 1018
1019 1019 def findoutgoing(self, remote, base=None, heads=None, force=False):
1020 1020 """Return list of nodes that are roots of subsets not in remote
1021 1021
1022 1022 If base dict is specified, assume that these nodes and their parents
1023 1023 exist on the remote side.
1024 1024 If a list of heads is specified, return only nodes which are heads
1025 1025 or ancestors of these heads, and return a second element which
1026 1026 contains all remote heads which get new children.
1027 1027 """
1028 1028 if base == None:
1029 1029 base = {}
1030 1030 self.findincoming(remote, base, heads, force=force)
1031 1031
1032 1032 self.ui.debug(_("common changesets up to ")
1033 1033 + " ".join(map(short, base.keys())) + "\n")
1034 1034
1035 1035 remain = dict.fromkeys(self.changelog.nodemap)
1036 1036
1037 1037 # prune everything remote has from the tree
1038 1038 del remain[nullid]
1039 1039 remove = base.keys()
1040 1040 while remove:
1041 1041 n = remove.pop(0)
1042 1042 if n in remain:
1043 1043 del remain[n]
1044 1044 for p in self.changelog.parents(n):
1045 1045 remove.append(p)
1046 1046
1047 1047 # find every node whose parents have been pruned
1048 1048 subset = []
1049 1049 # find every remote head that will get new children
1050 1050 updated_heads = {}
1051 1051 for n in remain:
1052 1052 p1, p2 = self.changelog.parents(n)
1053 1053 if p1 not in remain and p2 not in remain:
1054 1054 subset.append(n)
1055 1055 if heads:
1056 1056 if p1 in heads:
1057 1057 updated_heads[p1] = True
1058 1058 if p2 in heads:
1059 1059 updated_heads[p2] = True
1060 1060
1061 1061 # this is the set of all roots we have to push
1062 1062 if heads:
1063 1063 return subset, updated_heads.keys()
1064 1064 else:
1065 1065 return subset
1066 1066
1067 1067 def pull(self, remote, heads=None, force=False):
1068 1068 l = self.lock()
1069 1069
1070 1070 fetch = self.findincoming(remote, force=force)
1071 1071 if fetch == [nullid]:
1072 1072 self.ui.status(_("requesting all changes\n"))
1073 1073
1074 1074 if not fetch:
1075 1075 self.ui.status(_("no changes found\n"))
1076 1076 return 0
1077 1077
1078 1078 if heads is None:
1079 1079 cg = remote.changegroup(fetch, 'pull')
1080 1080 else:
1081 1081 cg = remote.changegroupsubset(fetch, heads, 'pull')
1082 1082 return self.addchangegroup(cg)
1083 1083
1084 1084 def push(self, remote, force=False, revs=None):
1085 1085 lock = remote.lock()
1086 1086
1087 1087 base = {}
1088 1088 remote_heads = remote.heads()
1089 1089 inc = self.findincoming(remote, base, remote_heads, force=force)
1090 1090 if not force and inc:
1091 1091 self.ui.warn(_("abort: unsynced remote changes!\n"))
1092 1092 self.ui.status(_("(did you forget to sync?"
1093 1093 " use push -f to force)\n"))
1094 1094 return 1
1095 1095
1096 1096 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1097 1097 if revs is not None:
1098 1098 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1099 1099 else:
1100 1100 bases, heads = update, self.changelog.heads()
1101 1101
1102 1102 if not bases:
1103 1103 self.ui.status(_("no changes found\n"))
1104 1104 return 1
1105 1105 elif not force:
1106 1106 # FIXME we don't properly detect creation of new heads
1107 1107 # in the push -r case, assume the user knows what he's doing
1108 1108 if not revs and len(remote_heads) < len(heads) \
1109 1109 and remote_heads != [nullid]:
1110 1110 self.ui.warn(_("abort: push creates new remote branches!\n"))
1111 1111 self.ui.status(_("(did you forget to merge?"
1112 1112 " use push -f to force)\n"))
1113 1113 return 1
1114 1114
1115 1115 if revs is None:
1116 1116 cg = self.changegroup(update, 'push')
1117 1117 else:
1118 1118 cg = self.changegroupsubset(update, revs, 'push')
1119 1119 return remote.addchangegroup(cg)
1120 1120
1121 1121 def changegroupsubset(self, bases, heads, source):
1122 1122 """This function generates a changegroup consisting of all the nodes
1123 1123 that are descendents of any of the bases, and ancestors of any of
1124 1124 the heads.
1125 1125
1126 1126 It is fairly complex as determining which filenodes and which
1127 1127 manifest nodes need to be included for the changeset to be complete
1128 1128 is non-trivial.
1129 1129
1130 1130 Another wrinkle is doing the reverse, figuring out which changeset in
1131 1131 the changegroup a particular filenode or manifestnode belongs to."""
1132 1132
1133 1133 self.hook('preoutgoing', throw=True, source=source)
1134 1134
1135 1135 # Set up some initial variables
1136 1136 # Make it easy to refer to self.changelog
1137 1137 cl = self.changelog
1138 1138 # msng is short for missing - compute the list of changesets in this
1139 1139 # changegroup.
1140 1140 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1141 1141 # Some bases may turn out to be superfluous, and some heads may be
1142 1142 # too. nodesbetween will return the minimal set of bases and heads
1143 1143 # necessary to re-create the changegroup.
1144 1144
1145 1145 # Known heads are the list of heads that it is assumed the recipient
1146 1146 # of this changegroup will know about.
1147 1147 knownheads = {}
1148 1148 # We assume that all parents of bases are known heads.
1149 1149 for n in bases:
1150 1150 for p in cl.parents(n):
1151 1151 if p != nullid:
1152 1152 knownheads[p] = 1
1153 1153 knownheads = knownheads.keys()
1154 1154 if knownheads:
1155 1155 # Now that we know what heads are known, we can compute which
1156 1156 # changesets are known. The recipient must know about all
1157 1157 # changesets required to reach the known heads from the null
1158 1158 # changeset.
1159 1159 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1160 1160 junk = None
1161 1161 # Transform the list into an ersatz set.
1162 1162 has_cl_set = dict.fromkeys(has_cl_set)
1163 1163 else:
1164 1164 # If there were no known heads, the recipient cannot be assumed to
1165 1165 # know about any changesets.
1166 1166 has_cl_set = {}
1167 1167
1168 1168 # Make it easy to refer to self.manifest
1169 1169 mnfst = self.manifest
1170 1170 # We don't know which manifests are missing yet
1171 1171 msng_mnfst_set = {}
1172 1172 # Nor do we know which filenodes are missing.
1173 1173 msng_filenode_set = {}
1174 1174
1175 1175 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1176 1176 junk = None
1177 1177
1178 1178 # A changeset always belongs to itself, so the changenode lookup
1179 1179 # function for a changenode is identity.
1180 1180 def identity(x):
1181 1181 return x
1182 1182
1183 1183 # A function generating function. Sets up an environment for the
1184 1184 # inner function.
1185 1185 def cmp_by_rev_func(revlog):
1186 1186 # Compare two nodes by their revision number in the environment's
1187 1187 # revision history. Since the revision number both represents the
1188 1188 # most efficient order to read the nodes in, and represents a
1189 1189 # topological sorting of the nodes, this function is often useful.
1190 1190 def cmp_by_rev(a, b):
1191 1191 return cmp(revlog.rev(a), revlog.rev(b))
1192 1192 return cmp_by_rev
1193 1193
1194 1194 # If we determine that a particular file or manifest node must be a
1195 1195 # node that the recipient of the changegroup will already have, we can
1196 1196 # also assume the recipient will have all the parents. This function
1197 1197 # prunes them from the set of missing nodes.
1198 1198 def prune_parents(revlog, hasset, msngset):
1199 1199 haslst = hasset.keys()
1200 1200 haslst.sort(cmp_by_rev_func(revlog))
1201 1201 for node in haslst:
1202 1202 parentlst = [p for p in revlog.parents(node) if p != nullid]
1203 1203 while parentlst:
1204 1204 n = parentlst.pop()
1205 1205 if n not in hasset:
1206 1206 hasset[n] = 1
1207 1207 p = [p for p in revlog.parents(n) if p != nullid]
1208 1208 parentlst.extend(p)
1209 1209 for n in hasset:
1210 1210 msngset.pop(n, None)
1211 1211
1212 1212 # This is a function generating function used to set up an environment
1213 1213 # for the inner function to execute in.
1214 1214 def manifest_and_file_collector(changedfileset):
1215 1215 # This is an information gathering function that gathers
1216 1216 # information from each changeset node that goes out as part of
1217 1217 # the changegroup. The information gathered is a list of which
1218 1218 # manifest nodes are potentially required (the recipient may
1219 1219 # already have them) and total list of all files which were
1220 1220 # changed in any changeset in the changegroup.
1221 1221 #
1222 1222 # We also remember the first changenode we saw any manifest
1223 1223 # referenced by so we can later determine which changenode 'owns'
1224 1224 # the manifest.
1225 1225 def collect_manifests_and_files(clnode):
1226 1226 c = cl.read(clnode)
1227 1227 for f in c[3]:
1228 1228 # This is to make sure we only have one instance of each
1229 1229 # filename string for each filename.
1230 1230 changedfileset.setdefault(f, f)
1231 1231 msng_mnfst_set.setdefault(c[0], clnode)
1232 1232 return collect_manifests_and_files
1233 1233
1234 1234 # Figure out which manifest nodes (of the ones we think might be part
1235 1235 # of the changegroup) the recipient must know about and remove them
1236 1236 # from the changegroup.
1237 1237 def prune_manifests():
1238 1238 has_mnfst_set = {}
1239 1239 for n in msng_mnfst_set:
1240 1240 # If a 'missing' manifest thinks it belongs to a changenode
1241 1241 # the recipient is assumed to have, obviously the recipient
1242 1242 # must have that manifest.
1243 1243 linknode = cl.node(mnfst.linkrev(n))
1244 1244 if linknode in has_cl_set:
1245 1245 has_mnfst_set[n] = 1
1246 1246 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1247 1247
1248 1248 # Use the information collected in collect_manifests_and_files to say
1249 1249 # which changenode any manifestnode belongs to.
1250 1250 def lookup_manifest_link(mnfstnode):
1251 1251 return msng_mnfst_set[mnfstnode]
1252 1252
1253 1253 # A function generating function that sets up the initial environment
1254 1254 # the inner function.
1255 1255 def filenode_collector(changedfiles):
1256 1256 next_rev = [0]
1257 1257 # This gathers information from each manifestnode included in the
1258 1258 # changegroup about which filenodes the manifest node references
1259 1259 # so we can include those in the changegroup too.
1260 1260 #
1261 1261 # It also remembers which changenode each filenode belongs to. It
1262 1262 # does this by assuming the a filenode belongs to the changenode
1263 1263 # the first manifest that references it belongs to.
1264 1264 def collect_msng_filenodes(mnfstnode):
1265 1265 r = mnfst.rev(mnfstnode)
1266 1266 if r == next_rev[0]:
1267 1267 # If the last rev we looked at was the one just previous,
1268 1268 # we only need to see a diff.
1269 1269 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1270 1270 # For each line in the delta
1271 1271 for dline in delta.splitlines():
1272 1272 # get the filename and filenode for that line
1273 1273 f, fnode = dline.split('\0')
1274 1274 fnode = bin(fnode[:40])
1275 1275 f = changedfiles.get(f, None)
1276 1276 # And if the file is in the list of files we care
1277 1277 # about.
1278 1278 if f is not None:
1279 1279 # Get the changenode this manifest belongs to
1280 1280 clnode = msng_mnfst_set[mnfstnode]
1281 1281 # Create the set of filenodes for the file if
1282 1282 # there isn't one already.
1283 1283 ndset = msng_filenode_set.setdefault(f, {})
1284 1284 # And set the filenode's changelog node to the
1285 1285 # manifest's if it hasn't been set already.
1286 1286 ndset.setdefault(fnode, clnode)
1287 1287 else:
1288 1288 # Otherwise we need a full manifest.
1289 1289 m = mnfst.read(mnfstnode)
1290 1290 # For every file in we care about.
1291 1291 for f in changedfiles:
1292 1292 fnode = m.get(f, None)
1293 1293 # If it's in the manifest
1294 1294 if fnode is not None:
1295 1295 # See comments above.
1296 1296 clnode = msng_mnfst_set[mnfstnode]
1297 1297 ndset = msng_filenode_set.setdefault(f, {})
1298 1298 ndset.setdefault(fnode, clnode)
1299 1299 # Remember the revision we hope to see next.
1300 1300 next_rev[0] = r + 1
1301 1301 return collect_msng_filenodes
1302 1302
1303 1303 # We have a list of filenodes we think we need for a file, lets remove
1304 1304 # all those we now the recipient must have.
1305 1305 def prune_filenodes(f, filerevlog):
1306 1306 msngset = msng_filenode_set[f]
1307 1307 hasset = {}
1308 1308 # If a 'missing' filenode thinks it belongs to a changenode we
1309 1309 # assume the recipient must have, then the recipient must have
1310 1310 # that filenode.
1311 1311 for n in msngset:
1312 1312 clnode = cl.node(filerevlog.linkrev(n))
1313 1313 if clnode in has_cl_set:
1314 1314 hasset[n] = 1
1315 1315 prune_parents(filerevlog, hasset, msngset)
1316 1316
1317 1317 # A function generator function that sets up the a context for the
1318 1318 # inner function.
1319 1319 def lookup_filenode_link_func(fname):
1320 1320 msngset = msng_filenode_set[fname]
1321 1321 # Lookup the changenode the filenode belongs to.
1322 1322 def lookup_filenode_link(fnode):
1323 1323 return msngset[fnode]
1324 1324 return lookup_filenode_link
1325 1325
1326 1326 # Now that we have all theses utility functions to help out and
1327 1327 # logically divide up the task, generate the group.
1328 1328 def gengroup():
1329 1329 # The set of changed files starts empty.
1330 1330 changedfiles = {}
1331 1331 # Create a changenode group generator that will call our functions
1332 1332 # back to lookup the owning changenode and collect information.
1333 1333 group = cl.group(msng_cl_lst, identity,
1334 1334 manifest_and_file_collector(changedfiles))
1335 1335 for chnk in group:
1336 1336 yield chnk
1337 1337
1338 1338 # The list of manifests has been collected by the generator
1339 1339 # calling our functions back.
1340 1340 prune_manifests()
1341 1341 msng_mnfst_lst = msng_mnfst_set.keys()
1342 1342 # Sort the manifestnodes by revision number.
1343 1343 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1344 1344 # Create a generator for the manifestnodes that calls our lookup
1345 1345 # and data collection functions back.
1346 1346 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1347 1347 filenode_collector(changedfiles))
1348 1348 for chnk in group:
1349 1349 yield chnk
1350 1350
1351 1351 # These are no longer needed, dereference and toss the memory for
1352 1352 # them.
1353 1353 msng_mnfst_lst = None
1354 1354 msng_mnfst_set.clear()
1355 1355
1356 1356 changedfiles = changedfiles.keys()
1357 1357 changedfiles.sort()
1358 1358 # Go through all our files in order sorted by name.
1359 1359 for fname in changedfiles:
1360 1360 filerevlog = self.file(fname)
1361 1361 # Toss out the filenodes that the recipient isn't really
1362 1362 # missing.
1363 1363 if msng_filenode_set.has_key(fname):
1364 1364 prune_filenodes(fname, filerevlog)
1365 1365 msng_filenode_lst = msng_filenode_set[fname].keys()
1366 1366 else:
1367 1367 msng_filenode_lst = []
1368 1368 # If any filenodes are left, generate the group for them,
1369 1369 # otherwise don't bother.
1370 1370 if len(msng_filenode_lst) > 0:
1371 1371 yield changegroup.genchunk(fname)
1372 1372 # Sort the filenodes by their revision #
1373 1373 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1374 1374 # Create a group generator and only pass in a changenode
1375 1375 # lookup function as we need to collect no information
1376 1376 # from filenodes.
1377 1377 group = filerevlog.group(msng_filenode_lst,
1378 1378 lookup_filenode_link_func(fname))
1379 1379 for chnk in group:
1380 1380 yield chnk
1381 1381 if msng_filenode_set.has_key(fname):
1382 1382 # Don't need this anymore, toss it to free memory.
1383 1383 del msng_filenode_set[fname]
1384 1384 # Signal that no more groups are left.
1385 1385 yield changegroup.closechunk()
1386 1386
1387 1387 if msng_cl_lst:
1388 1388 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1389 1389
1390 1390 return util.chunkbuffer(gengroup())
1391 1391
1392 1392 def changegroup(self, basenodes, source):
1393 1393 """Generate a changegroup of all nodes that we have that a recipient
1394 1394 doesn't.
1395 1395
1396 1396 This is much easier than the previous function as we can assume that
1397 1397 the recipient has any changenode we aren't sending them."""
1398 1398
1399 1399 self.hook('preoutgoing', throw=True, source=source)
1400 1400
1401 1401 cl = self.changelog
1402 1402 nodes = cl.nodesbetween(basenodes, None)[0]
1403 1403 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1404 1404
1405 1405 def identity(x):
1406 1406 return x
1407 1407
1408 1408 def gennodelst(revlog):
1409 1409 for r in xrange(0, revlog.count()):
1410 1410 n = revlog.node(r)
1411 1411 if revlog.linkrev(n) in revset:
1412 1412 yield n
1413 1413
1414 1414 def changed_file_collector(changedfileset):
1415 1415 def collect_changed_files(clnode):
1416 1416 c = cl.read(clnode)
1417 1417 for fname in c[3]:
1418 1418 changedfileset[fname] = 1
1419 1419 return collect_changed_files
1420 1420
1421 1421 def lookuprevlink_func(revlog):
1422 1422 def lookuprevlink(n):
1423 1423 return cl.node(revlog.linkrev(n))
1424 1424 return lookuprevlink
1425 1425
1426 1426 def gengroup():
1427 1427 # construct a list of all changed files
1428 1428 changedfiles = {}
1429 1429
1430 1430 for chnk in cl.group(nodes, identity,
1431 1431 changed_file_collector(changedfiles)):
1432 1432 yield chnk
1433 1433 changedfiles = changedfiles.keys()
1434 1434 changedfiles.sort()
1435 1435
1436 1436 mnfst = self.manifest
1437 1437 nodeiter = gennodelst(mnfst)
1438 1438 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1439 1439 yield chnk
1440 1440
1441 1441 for fname in changedfiles:
1442 1442 filerevlog = self.file(fname)
1443 1443 nodeiter = gennodelst(filerevlog)
1444 1444 nodeiter = list(nodeiter)
1445 1445 if nodeiter:
1446 1446 yield changegroup.genchunk(fname)
1447 1447 lookup = lookuprevlink_func(filerevlog)
1448 1448 for chnk in filerevlog.group(nodeiter, lookup):
1449 1449 yield chnk
1450 1450
1451 1451 yield changegroup.closechunk()
1452 1452
1453 1453 if nodes:
1454 1454 self.hook('outgoing', node=hex(nodes[0]), source=source)
1455 1455
1456 1456 return util.chunkbuffer(gengroup())
1457 1457
1458 1458 def addchangegroup(self, source):
1459 1459 """add changegroup to repo.
1460 1460 returns number of heads modified or added + 1."""
1461 1461
1462 1462 def csmap(x):
1463 1463 self.ui.debug(_("add changeset %s\n") % short(x))
1464 1464 return cl.count()
1465 1465
1466 1466 def revmap(x):
1467 1467 return cl.rev(x)
1468 1468
1469 1469 if not source:
1470 1470 return 0
1471 1471
1472 self.hook('prechangegroup', throw=True)
1472 self.hook('prechangegroup', throw=True, source=source)
1473 1473
1474 1474 changesets = files = revisions = 0
1475 1475
1476 1476 tr = self.transaction()
1477 1477
1478 1478 # write changelog and manifest data to temp files so
1479 1479 # concurrent readers will not see inconsistent view
1480 1480 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1481 1481
1482 1482 oldheads = len(cl.heads())
1483 1483
1484 1484 # pull off the changeset group
1485 1485 self.ui.status(_("adding changesets\n"))
1486 1486 co = cl.tip()
1487 1487 chunkiter = changegroup.chunkiter(source)
1488 1488 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1489 1489 cnr, cor = map(cl.rev, (cn, co))
1490 1490 if cn == nullid:
1491 1491 cnr = cor
1492 1492 changesets = cnr - cor
1493 1493
1494 1494 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1495 1495
1496 1496 # pull off the manifest group
1497 1497 self.ui.status(_("adding manifests\n"))
1498 1498 mm = mf.tip()
1499 1499 chunkiter = changegroup.chunkiter(source)
1500 1500 mo = mf.addgroup(chunkiter, revmap, tr)
1501 1501
1502 1502 # process the files
1503 1503 self.ui.status(_("adding file changes\n"))
1504 1504 while 1:
1505 1505 f = changegroup.getchunk(source)
1506 1506 if not f:
1507 1507 break
1508 1508 self.ui.debug(_("adding %s revisions\n") % f)
1509 1509 fl = self.file(f)
1510 1510 o = fl.count()
1511 1511 chunkiter = changegroup.chunkiter(source)
1512 1512 n = fl.addgroup(chunkiter, revmap, tr)
1513 1513 revisions += fl.count() - o
1514 1514 files += 1
1515 1515
1516 1516 # write order here is important so concurrent readers will see
1517 1517 # consistent view of repo
1518 1518 mf.writedata()
1519 1519 cl.writedata()
1520 1520
1521 1521 # make changelog and manifest see real files again
1522 1522 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1523 1523 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1524 1524 self.changelog.checkinlinesize(tr)
1525 1525 self.manifest.checkinlinesize(tr)
1526 1526
1527 1527 newheads = len(self.changelog.heads())
1528 1528 heads = ""
1529 1529 if oldheads and newheads > oldheads:
1530 1530 heads = _(" (+%d heads)") % (newheads - oldheads)
1531 1531
1532 1532 self.ui.status(_("added %d changesets"
1533 1533 " with %d changes to %d files%s\n")
1534 1534 % (changesets, revisions, files, heads))
1535 1535
1536 1536 self.hook('pretxnchangegroup', throw=True,
1537 node=hex(self.changelog.node(cor+1)))
1537 node=hex(self.changelog.node(cor+1)), source=source)
1538 1538
1539 1539 tr.close()
1540 1540
1541 1541 if changesets > 0:
1542 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1542 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1543 source=source)
1543 1544
1544 1545 for i in range(cor + 1, cnr + 1):
1545 self.hook("incoming", node=hex(self.changelog.node(i)))
1546 self.hook("incoming", node=hex(self.changelog.node(i)),
1547 source=source)
1546 1548
1547 1549 return newheads - oldheads + 1
1548 1550
1549 1551 def update(self, node, allow=False, force=False, choose=None,
1550 1552 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1551 1553 pl = self.dirstate.parents()
1552 1554 if not force and pl[1] != nullid:
1553 1555 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1554 1556 return 1
1555 1557
1556 1558 err = False
1557 1559
1558 1560 p1, p2 = pl[0], node
1559 1561 pa = self.changelog.ancestor(p1, p2)
1560 1562 m1n = self.changelog.read(p1)[0]
1561 1563 m2n = self.changelog.read(p2)[0]
1562 1564 man = self.manifest.ancestor(m1n, m2n)
1563 1565 m1 = self.manifest.read(m1n)
1564 1566 mf1 = self.manifest.readflags(m1n)
1565 1567 m2 = self.manifest.read(m2n).copy()
1566 1568 mf2 = self.manifest.readflags(m2n)
1567 1569 ma = self.manifest.read(man)
1568 1570 mfa = self.manifest.readflags(man)
1569 1571
1570 1572 modified, added, removed, deleted, unknown = self.changes()
1571 1573
1572 1574 # is this a jump, or a merge? i.e. is there a linear path
1573 1575 # from p1 to p2?
1574 1576 linear_path = (pa == p1 or pa == p2)
1575 1577
1576 1578 if allow and linear_path:
1577 1579 raise util.Abort(_("there is nothing to merge, "
1578 1580 "just use 'hg update'"))
1579 1581 if allow and not forcemerge:
1580 1582 if modified or added or removed:
1581 1583 raise util.Abort(_("outstanding uncommitted changes"))
1582 1584 if not forcemerge and not force:
1583 1585 for f in unknown:
1584 1586 if f in m2:
1585 1587 t1 = self.wread(f)
1586 1588 t2 = self.file(f).read(m2[f])
1587 1589 if cmp(t1, t2) != 0:
1588 1590 raise util.Abort(_("'%s' already exists in the working"
1589 1591 " dir and differs from remote") % f)
1590 1592
1591 1593 # resolve the manifest to determine which files
1592 1594 # we care about merging
1593 1595 self.ui.note(_("resolving manifests\n"))
1594 1596 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1595 1597 (force, allow, moddirstate, linear_path))
1596 1598 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1597 1599 (short(man), short(m1n), short(m2n)))
1598 1600
1599 1601 merge = {}
1600 1602 get = {}
1601 1603 remove = []
1602 1604
1603 1605 # construct a working dir manifest
1604 1606 mw = m1.copy()
1605 1607 mfw = mf1.copy()
1606 1608 umap = dict.fromkeys(unknown)
1607 1609
1608 1610 for f in added + modified + unknown:
1609 1611 mw[f] = ""
1610 1612 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1611 1613
1612 1614 if moddirstate and not wlock:
1613 1615 wlock = self.wlock()
1614 1616
1615 1617 for f in deleted + removed:
1616 1618 if f in mw:
1617 1619 del mw[f]
1618 1620
1619 1621 # If we're jumping between revisions (as opposed to merging),
1620 1622 # and if neither the working directory nor the target rev has
1621 1623 # the file, then we need to remove it from the dirstate, to
1622 1624 # prevent the dirstate from listing the file when it is no
1623 1625 # longer in the manifest.
1624 1626 if moddirstate and linear_path and f not in m2:
1625 1627 self.dirstate.forget((f,))
1626 1628
1627 1629 # Compare manifests
1628 1630 for f, n in mw.iteritems():
1629 1631 if choose and not choose(f):
1630 1632 continue
1631 1633 if f in m2:
1632 1634 s = 0
1633 1635
1634 1636 # is the wfile new since m1, and match m2?
1635 1637 if f not in m1:
1636 1638 t1 = self.wread(f)
1637 1639 t2 = self.file(f).read(m2[f])
1638 1640 if cmp(t1, t2) == 0:
1639 1641 n = m2[f]
1640 1642 del t1, t2
1641 1643
1642 1644 # are files different?
1643 1645 if n != m2[f]:
1644 1646 a = ma.get(f, nullid)
1645 1647 # are both different from the ancestor?
1646 1648 if n != a and m2[f] != a:
1647 1649 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1648 1650 # merge executable bits
1649 1651 # "if we changed or they changed, change in merge"
1650 1652 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1651 1653 mode = ((a^b) | (a^c)) ^ a
1652 1654 merge[f] = (m1.get(f, nullid), m2[f], mode)
1653 1655 s = 1
1654 1656 # are we clobbering?
1655 1657 # is remote's version newer?
1656 1658 # or are we going back in time?
1657 1659 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1658 1660 self.ui.debug(_(" remote %s is newer, get\n") % f)
1659 1661 get[f] = m2[f]
1660 1662 s = 1
1661 1663 elif f in umap or f in added:
1662 1664 # this unknown file is the same as the checkout
1663 1665 # we need to reset the dirstate if the file was added
1664 1666 get[f] = m2[f]
1665 1667
1666 1668 if not s and mfw[f] != mf2[f]:
1667 1669 if force:
1668 1670 self.ui.debug(_(" updating permissions for %s\n") % f)
1669 1671 util.set_exec(self.wjoin(f), mf2[f])
1670 1672 else:
1671 1673 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1672 1674 mode = ((a^b) | (a^c)) ^ a
1673 1675 if mode != b:
1674 1676 self.ui.debug(_(" updating permissions for %s\n")
1675 1677 % f)
1676 1678 util.set_exec(self.wjoin(f), mode)
1677 1679 del m2[f]
1678 1680 elif f in ma:
1679 1681 if n != ma[f]:
1680 1682 r = _("d")
1681 1683 if not force and (linear_path or allow):
1682 1684 r = self.ui.prompt(
1683 1685 (_(" local changed %s which remote deleted\n") % f) +
1684 1686 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1685 1687 if r == _("d"):
1686 1688 remove.append(f)
1687 1689 else:
1688 1690 self.ui.debug(_("other deleted %s\n") % f)
1689 1691 remove.append(f) # other deleted it
1690 1692 else:
1691 1693 # file is created on branch or in working directory
1692 1694 if force and f not in umap:
1693 1695 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1694 1696 remove.append(f)
1695 1697 elif n == m1.get(f, nullid): # same as parent
1696 1698 if p2 == pa: # going backwards?
1697 1699 self.ui.debug(_("remote deleted %s\n") % f)
1698 1700 remove.append(f)
1699 1701 else:
1700 1702 self.ui.debug(_("local modified %s, keeping\n") % f)
1701 1703 else:
1702 1704 self.ui.debug(_("working dir created %s, keeping\n") % f)
1703 1705
1704 1706 for f, n in m2.iteritems():
1705 1707 if choose and not choose(f):
1706 1708 continue
1707 1709 if f[0] == "/":
1708 1710 continue
1709 1711 if f in ma and n != ma[f]:
1710 1712 r = _("k")
1711 1713 if not force and (linear_path or allow):
1712 1714 r = self.ui.prompt(
1713 1715 (_("remote changed %s which local deleted\n") % f) +
1714 1716 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1715 1717 if r == _("k"):
1716 1718 get[f] = n
1717 1719 elif f not in ma:
1718 1720 self.ui.debug(_("remote created %s\n") % f)
1719 1721 get[f] = n
1720 1722 else:
1721 1723 if force or p2 == pa: # going backwards?
1722 1724 self.ui.debug(_("local deleted %s, recreating\n") % f)
1723 1725 get[f] = n
1724 1726 else:
1725 1727 self.ui.debug(_("local deleted %s\n") % f)
1726 1728
1727 1729 del mw, m1, m2, ma
1728 1730
1729 1731 if force:
1730 1732 for f in merge:
1731 1733 get[f] = merge[f][1]
1732 1734 merge = {}
1733 1735
1734 1736 if linear_path or force:
1735 1737 # we don't need to do any magic, just jump to the new rev
1736 1738 branch_merge = False
1737 1739 p1, p2 = p2, nullid
1738 1740 else:
1739 1741 if not allow:
1740 1742 self.ui.status(_("this update spans a branch"
1741 1743 " affecting the following files:\n"))
1742 1744 fl = merge.keys() + get.keys()
1743 1745 fl.sort()
1744 1746 for f in fl:
1745 1747 cf = ""
1746 1748 if f in merge:
1747 1749 cf = _(" (resolve)")
1748 1750 self.ui.status(" %s%s\n" % (f, cf))
1749 1751 self.ui.warn(_("aborting update spanning branches!\n"))
1750 1752 self.ui.status(_("(use 'hg merge' to merge across branches"
1751 1753 " or 'hg update -C' to lose changes)\n"))
1752 1754 return 1
1753 1755 branch_merge = True
1754 1756
1755 1757 # get the files we don't need to change
1756 1758 files = get.keys()
1757 1759 files.sort()
1758 1760 for f in files:
1759 1761 if f[0] == "/":
1760 1762 continue
1761 1763 self.ui.note(_("getting %s\n") % f)
1762 1764 t = self.file(f).read(get[f])
1763 1765 self.wwrite(f, t)
1764 1766 util.set_exec(self.wjoin(f), mf2[f])
1765 1767 if moddirstate:
1766 1768 if branch_merge:
1767 1769 self.dirstate.update([f], 'n', st_mtime=-1)
1768 1770 else:
1769 1771 self.dirstate.update([f], 'n')
1770 1772
1771 1773 # merge the tricky bits
1772 1774 failedmerge = []
1773 1775 files = merge.keys()
1774 1776 files.sort()
1775 1777 xp1 = hex(p1)
1776 1778 xp2 = hex(p2)
1777 1779 for f in files:
1778 1780 self.ui.status(_("merging %s\n") % f)
1779 1781 my, other, flag = merge[f]
1780 1782 ret = self.merge3(f, my, other, xp1, xp2)
1781 1783 if ret:
1782 1784 err = True
1783 1785 failedmerge.append(f)
1784 1786 util.set_exec(self.wjoin(f), flag)
1785 1787 if moddirstate:
1786 1788 if branch_merge:
1787 1789 # We've done a branch merge, mark this file as merged
1788 1790 # so that we properly record the merger later
1789 1791 self.dirstate.update([f], 'm')
1790 1792 else:
1791 1793 # We've update-merged a locally modified file, so
1792 1794 # we set the dirstate to emulate a normal checkout
1793 1795 # of that file some time in the past. Thus our
1794 1796 # merge will appear as a normal local file
1795 1797 # modification.
1796 1798 f_len = len(self.file(f).read(other))
1797 1799 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1798 1800
1799 1801 remove.sort()
1800 1802 for f in remove:
1801 1803 self.ui.note(_("removing %s\n") % f)
1802 1804 util.audit_path(f)
1803 1805 try:
1804 1806 util.unlink(self.wjoin(f))
1805 1807 except OSError, inst:
1806 1808 if inst.errno != errno.ENOENT:
1807 1809 self.ui.warn(_("update failed to remove %s: %s!\n") %
1808 1810 (f, inst.strerror))
1809 1811 if moddirstate:
1810 1812 if branch_merge:
1811 1813 self.dirstate.update(remove, 'r')
1812 1814 else:
1813 1815 self.dirstate.forget(remove)
1814 1816
1815 1817 if moddirstate:
1816 1818 self.dirstate.setparents(p1, p2)
1817 1819
1818 1820 if show_stats:
1819 1821 stats = ((len(get), _("updated")),
1820 1822 (len(merge) - len(failedmerge), _("merged")),
1821 1823 (len(remove), _("removed")),
1822 1824 (len(failedmerge), _("unresolved")))
1823 1825 note = ", ".join([_("%d files %s") % s for s in stats])
1824 1826 self.ui.status("%s\n" % note)
1825 1827 if moddirstate:
1826 1828 if branch_merge:
1827 1829 if failedmerge:
1828 1830 self.ui.status(_("There are unresolved merges,"
1829 1831 " you can redo the full merge using:\n"
1830 1832 " hg update -C %s\n"
1831 1833 " hg merge %s\n"
1832 1834 % (self.changelog.rev(p1),
1833 1835 self.changelog.rev(p2))))
1834 1836 else:
1835 1837 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1836 1838 elif failedmerge:
1837 1839 self.ui.status(_("There are unresolved merges with"
1838 1840 " locally modified files.\n"))
1839 1841
1840 1842 return err
1841 1843
1842 1844 def merge3(self, fn, my, other, p1, p2):
1843 1845 """perform a 3-way merge in the working directory"""
1844 1846
1845 1847 def temp(prefix, node):
1846 1848 pre = "%s~%s." % (os.path.basename(fn), prefix)
1847 1849 (fd, name) = tempfile.mkstemp(prefix=pre)
1848 1850 f = os.fdopen(fd, "wb")
1849 1851 self.wwrite(fn, fl.read(node), f)
1850 1852 f.close()
1851 1853 return name
1852 1854
1853 1855 fl = self.file(fn)
1854 1856 base = fl.ancestor(my, other)
1855 1857 a = self.wjoin(fn)
1856 1858 b = temp("base", base)
1857 1859 c = temp("other", other)
1858 1860
1859 1861 self.ui.note(_("resolving %s\n") % fn)
1860 1862 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1861 1863 (fn, short(my), short(other), short(base)))
1862 1864
1863 1865 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1864 1866 or "hgmerge")
1865 1867 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1866 1868 environ={'HG_FILE': fn,
1867 1869 'HG_MY_NODE': p1,
1868 1870 'HG_OTHER_NODE': p2,
1869 1871 'HG_FILE_MY_NODE': hex(my),
1870 1872 'HG_FILE_OTHER_NODE': hex(other),
1871 1873 'HG_FILE_BASE_NODE': hex(base)})
1872 1874 if r:
1873 1875 self.ui.warn(_("merging %s failed!\n") % fn)
1874 1876
1875 1877 os.unlink(b)
1876 1878 os.unlink(c)
1877 1879 return r
1878 1880
1879 1881 def verify(self):
1880 1882 filelinkrevs = {}
1881 1883 filenodes = {}
1882 1884 changesets = revisions = files = 0
1883 1885 errors = [0]
1884 1886 warnings = [0]
1885 1887 neededmanifests = {}
1886 1888
1887 1889 def err(msg):
1888 1890 self.ui.warn(msg + "\n")
1889 1891 errors[0] += 1
1890 1892
1891 1893 def warn(msg):
1892 1894 self.ui.warn(msg + "\n")
1893 1895 warnings[0] += 1
1894 1896
1895 1897 def checksize(obj, name):
1896 1898 d = obj.checksize()
1897 1899 if d[0]:
1898 1900 err(_("%s data length off by %d bytes") % (name, d[0]))
1899 1901 if d[1]:
1900 1902 err(_("%s index contains %d extra bytes") % (name, d[1]))
1901 1903
1902 1904 def checkversion(obj, name):
1903 1905 if obj.version != revlog.REVLOGV0:
1904 1906 if not revlogv1:
1905 1907 warn(_("warning: `%s' uses revlog format 1") % name)
1906 1908 elif revlogv1:
1907 1909 warn(_("warning: `%s' uses revlog format 0") % name)
1908 1910
1909 1911 revlogv1 = self.revlogversion != revlog.REVLOGV0
1910 1912 if self.ui.verbose or revlogv1 != self.revlogv1:
1911 1913 self.ui.status(_("repository uses revlog format %d\n") %
1912 1914 (revlogv1 and 1 or 0))
1913 1915
1914 1916 seen = {}
1915 1917 self.ui.status(_("checking changesets\n"))
1916 1918 checksize(self.changelog, "changelog")
1917 1919
1918 1920 for i in range(self.changelog.count()):
1919 1921 changesets += 1
1920 1922 n = self.changelog.node(i)
1921 1923 l = self.changelog.linkrev(n)
1922 1924 if l != i:
1923 1925 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1924 1926 if n in seen:
1925 1927 err(_("duplicate changeset at revision %d") % i)
1926 1928 seen[n] = 1
1927 1929
1928 1930 for p in self.changelog.parents(n):
1929 1931 if p not in self.changelog.nodemap:
1930 1932 err(_("changeset %s has unknown parent %s") %
1931 1933 (short(n), short(p)))
1932 1934 try:
1933 1935 changes = self.changelog.read(n)
1934 1936 except KeyboardInterrupt:
1935 1937 self.ui.warn(_("interrupted"))
1936 1938 raise
1937 1939 except Exception, inst:
1938 1940 err(_("unpacking changeset %s: %s") % (short(n), inst))
1939 1941 continue
1940 1942
1941 1943 neededmanifests[changes[0]] = n
1942 1944
1943 1945 for f in changes[3]:
1944 1946 filelinkrevs.setdefault(f, []).append(i)
1945 1947
1946 1948 seen = {}
1947 1949 self.ui.status(_("checking manifests\n"))
1948 1950 checkversion(self.manifest, "manifest")
1949 1951 checksize(self.manifest, "manifest")
1950 1952
1951 1953 for i in range(self.manifest.count()):
1952 1954 n = self.manifest.node(i)
1953 1955 l = self.manifest.linkrev(n)
1954 1956
1955 1957 if l < 0 or l >= self.changelog.count():
1956 1958 err(_("bad manifest link (%d) at revision %d") % (l, i))
1957 1959
1958 1960 if n in neededmanifests:
1959 1961 del neededmanifests[n]
1960 1962
1961 1963 if n in seen:
1962 1964 err(_("duplicate manifest at revision %d") % i)
1963 1965
1964 1966 seen[n] = 1
1965 1967
1966 1968 for p in self.manifest.parents(n):
1967 1969 if p not in self.manifest.nodemap:
1968 1970 err(_("manifest %s has unknown parent %s") %
1969 1971 (short(n), short(p)))
1970 1972
1971 1973 try:
1972 1974 delta = mdiff.patchtext(self.manifest.delta(n))
1973 1975 except KeyboardInterrupt:
1974 1976 self.ui.warn(_("interrupted"))
1975 1977 raise
1976 1978 except Exception, inst:
1977 1979 err(_("unpacking manifest %s: %s") % (short(n), inst))
1978 1980 continue
1979 1981
1980 1982 try:
1981 1983 ff = [ l.split('\0') for l in delta.splitlines() ]
1982 1984 for f, fn in ff:
1983 1985 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1984 1986 except (ValueError, TypeError), inst:
1985 1987 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1986 1988
1987 1989 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1988 1990
1989 1991 for m, c in neededmanifests.items():
1990 1992 err(_("Changeset %s refers to unknown manifest %s") %
1991 1993 (short(m), short(c)))
1992 1994 del neededmanifests
1993 1995
1994 1996 for f in filenodes:
1995 1997 if f not in filelinkrevs:
1996 1998 err(_("file %s in manifest but not in changesets") % f)
1997 1999
1998 2000 for f in filelinkrevs:
1999 2001 if f not in filenodes:
2000 2002 err(_("file %s in changeset but not in manifest") % f)
2001 2003
2002 2004 self.ui.status(_("checking files\n"))
2003 2005 ff = filenodes.keys()
2004 2006 ff.sort()
2005 2007 for f in ff:
2006 2008 if f == "/dev/null":
2007 2009 continue
2008 2010 files += 1
2009 2011 if not f:
2010 2012 err(_("file without name in manifest %s") % short(n))
2011 2013 continue
2012 2014 fl = self.file(f)
2013 2015 checkversion(fl, f)
2014 2016 checksize(fl, f)
2015 2017
2016 2018 nodes = {nullid: 1}
2017 2019 seen = {}
2018 2020 for i in range(fl.count()):
2019 2021 revisions += 1
2020 2022 n = fl.node(i)
2021 2023
2022 2024 if n in seen:
2023 2025 err(_("%s: duplicate revision %d") % (f, i))
2024 2026 if n not in filenodes[f]:
2025 2027 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2026 2028 else:
2027 2029 del filenodes[f][n]
2028 2030
2029 2031 flr = fl.linkrev(n)
2030 2032 if flr not in filelinkrevs.get(f, []):
2031 2033 err(_("%s:%s points to unexpected changeset %d")
2032 2034 % (f, short(n), flr))
2033 2035 else:
2034 2036 filelinkrevs[f].remove(flr)
2035 2037
2036 2038 # verify contents
2037 2039 try:
2038 2040 t = fl.read(n)
2039 2041 except KeyboardInterrupt:
2040 2042 self.ui.warn(_("interrupted"))
2041 2043 raise
2042 2044 except Exception, inst:
2043 2045 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2044 2046
2045 2047 # verify parents
2046 2048 (p1, p2) = fl.parents(n)
2047 2049 if p1 not in nodes:
2048 2050 err(_("file %s:%s unknown parent 1 %s") %
2049 2051 (f, short(n), short(p1)))
2050 2052 if p2 not in nodes:
2051 2053 err(_("file %s:%s unknown parent 2 %s") %
2052 2054 (f, short(n), short(p1)))
2053 2055 nodes[n] = 1
2054 2056
2055 2057 # cross-check
2056 2058 for node in filenodes[f]:
2057 2059 err(_("node %s in manifests not in %s") % (hex(node), f))
2058 2060
2059 2061 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2060 2062 (files, changesets, revisions))
2061 2063
2062 2064 if warnings[0]:
2063 2065 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2064 2066 if errors[0]:
2065 2067 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2066 2068 return 1
2067 2069
2068 2070 # used to avoid circular references so destructors work
2069 2071 def aftertrans(base):
2070 2072 p = base
2071 2073 def a():
2072 2074 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2073 2075 util.rename(os.path.join(p, "journal.dirstate"),
2074 2076 os.path.join(p, "undo.dirstate"))
2075 2077 return a
2076 2078
General Comments 0
You need to be logged in to leave comments. Login now