##// END OF EJS Templates
fix bug in localrepo.changes....
Vadim Gelfer -
r2478:287b7da4 default
parent child Browse files
Show More
@@ -1,2151 +1,2152
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "appendfile changegroup")
12 12 demandload(globals(), "changelog dirstate filelog manifest repo")
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 14 demandload(globals(), "os revlog util")
15 15
16 16 class localrepository(object):
17 17 capabilities = ()
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 if not path:
23 23 p = os.getcwd()
24 24 while not os.path.isdir(os.path.join(p, ".hg")):
25 25 oldp = p
26 26 p = os.path.dirname(p)
27 27 if p == oldp:
28 28 raise repo.RepoError(_("no repo found"))
29 29 path = p
30 30 self.path = os.path.join(path, ".hg")
31 31
32 32 if not create and not os.path.isdir(self.path):
33 33 raise repo.RepoError(_("repository %s not found") % path)
34 34
35 35 self.root = os.path.abspath(path)
36 36 self.origroot = path
37 37 self.ui = ui.ui(parentui=parentui)
38 38 self.opener = util.opener(self.path)
39 39 self.wopener = util.opener(self.root)
40 40
41 41 try:
42 42 self.ui.readconfig(self.join("hgrc"), self.root)
43 43 except IOError:
44 44 pass
45 45
46 46 v = self.ui.revlogopts
47 47 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
48 48 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
49 49 fl = v.get('flags', None)
50 50 flags = 0
51 51 if fl != None:
52 52 for x in fl.split():
53 53 flags |= revlog.flagstr(x)
54 54 elif self.revlogv1:
55 55 flags = revlog.REVLOG_DEFAULT_FLAGS
56 56
57 57 v = self.revlogversion | flags
58 58 self.manifest = manifest.manifest(self.opener, v)
59 59 self.changelog = changelog.changelog(self.opener, v)
60 60
61 61 # the changelog might not have the inline index flag
62 62 # on. If the format of the changelog is the same as found in
63 63 # .hgrc, apply any flags found in the .hgrc as well.
64 64 # Otherwise, just version from the changelog
65 65 v = self.changelog.version
66 66 if v == self.revlogversion:
67 67 v |= flags
68 68 self.revlogversion = v
69 69
70 70 self.tagscache = None
71 71 self.nodetagscache = None
72 72 self.encodepats = None
73 73 self.decodepats = None
74 74 self.transhandle = None
75 75
76 76 if create:
77 77 os.mkdir(self.path)
78 78 os.mkdir(self.join("data"))
79 79
80 80 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
81 81
82 82 def hook(self, name, throw=False, **args):
83 83 def callhook(hname, funcname):
84 84 '''call python hook. hook is callable object, looked up as
85 85 name in python module. if callable returns "true", hook
86 86 fails, else passes. if hook raises exception, treated as
87 87 hook failure. exception propagates if throw is "true".
88 88
89 89 reason for "true" meaning "hook failed" is so that
90 90 unmodified commands (e.g. mercurial.commands.update) can
91 91 be run as hooks without wrappers to convert return values.'''
92 92
93 93 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
94 94 d = funcname.rfind('.')
95 95 if d == -1:
96 96 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
97 97 % (hname, funcname))
98 98 modname = funcname[:d]
99 99 try:
100 100 obj = __import__(modname)
101 101 except ImportError:
102 102 raise util.Abort(_('%s hook is invalid '
103 103 '(import of "%s" failed)') %
104 104 (hname, modname))
105 105 try:
106 106 for p in funcname.split('.')[1:]:
107 107 obj = getattr(obj, p)
108 108 except AttributeError, err:
109 109 raise util.Abort(_('%s hook is invalid '
110 110 '("%s" is not defined)') %
111 111 (hname, funcname))
112 112 if not callable(obj):
113 113 raise util.Abort(_('%s hook is invalid '
114 114 '("%s" is not callable)') %
115 115 (hname, funcname))
116 116 try:
117 117 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
118 118 except (KeyboardInterrupt, util.SignalInterrupt):
119 119 raise
120 120 except Exception, exc:
121 121 if isinstance(exc, util.Abort):
122 122 self.ui.warn(_('error: %s hook failed: %s\n') %
123 123 (hname, exc.args[0] % exc.args[1:]))
124 124 else:
125 125 self.ui.warn(_('error: %s hook raised an exception: '
126 126 '%s\n') % (hname, exc))
127 127 if throw:
128 128 raise
129 129 self.ui.print_exc()
130 130 return True
131 131 if r:
132 132 if throw:
133 133 raise util.Abort(_('%s hook failed') % hname)
134 134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 135 return r
136 136
137 137 def runhook(name, cmd):
138 138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
140 140 r = util.system(cmd, environ=env, cwd=self.root)
141 141 if r:
142 142 desc, r = util.explain_exit(r)
143 143 if throw:
144 144 raise util.Abort(_('%s hook %s') % (name, desc))
145 145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 146 return r
147 147
148 148 r = False
149 149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 150 if hname.split(".", 1)[0] == name and cmd]
151 151 hooks.sort()
152 152 for hname, cmd in hooks:
153 153 if cmd.startswith('python:'):
154 154 r = callhook(hname, cmd[7:].strip()) or r
155 155 else:
156 156 r = runhook(hname, cmd) or r
157 157 return r
158 158
159 159 def tags(self):
160 160 '''return a mapping of tag to node'''
161 161 if not self.tagscache:
162 162 self.tagscache = {}
163 163
164 164 def parsetag(line, context):
165 165 if not line:
166 166 return
167 167 s = l.split(" ", 1)
168 168 if len(s) != 2:
169 169 self.ui.warn(_("%s: cannot parse entry\n") % context)
170 170 return
171 171 node, key = s
172 172 key = key.strip()
173 173 try:
174 174 bin_n = bin(node)
175 175 except TypeError:
176 176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
177 177 (context, node))
178 178 return
179 179 if bin_n not in self.changelog.nodemap:
180 180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
181 181 (context, key))
182 182 return
183 183 self.tagscache[key] = bin_n
184 184
185 185 # read the tags file from each head, ending with the tip,
186 186 # and add each tag found to the map, with "newer" ones
187 187 # taking precedence
188 188 heads = self.heads()
189 189 heads.reverse()
190 190 fl = self.file(".hgtags")
191 191 for node in heads:
192 192 change = self.changelog.read(node)
193 193 rev = self.changelog.rev(node)
194 194 fn, ff = self.manifest.find(change[0], '.hgtags')
195 195 if fn is None: continue
196 196 count = 0
197 197 for l in fl.read(fn).splitlines():
198 198 count += 1
199 199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
200 200 (rev, short(node), count))
201 201 try:
202 202 f = self.opener("localtags")
203 203 count = 0
204 204 for l in f:
205 205 count += 1
206 206 parsetag(l, _("localtags, line %d") % count)
207 207 except IOError:
208 208 pass
209 209
210 210 self.tagscache['tip'] = self.changelog.tip()
211 211
212 212 return self.tagscache
213 213
214 214 def tagslist(self):
215 215 '''return a list of tags ordered by revision'''
216 216 l = []
217 217 for t, n in self.tags().items():
218 218 try:
219 219 r = self.changelog.rev(n)
220 220 except:
221 221 r = -2 # sort to the beginning of the list if unknown
222 222 l.append((r, t, n))
223 223 l.sort()
224 224 return [(t, n) for r, t, n in l]
225 225
226 226 def nodetags(self, node):
227 227 '''return the tags associated with a node'''
228 228 if not self.nodetagscache:
229 229 self.nodetagscache = {}
230 230 for t, n in self.tags().items():
231 231 self.nodetagscache.setdefault(n, []).append(t)
232 232 return self.nodetagscache.get(node, [])
233 233
234 234 def lookup(self, key):
235 235 try:
236 236 return self.tags()[key]
237 237 except KeyError:
238 238 try:
239 239 return self.changelog.lookup(key)
240 240 except:
241 241 raise repo.RepoError(_("unknown revision '%s'") % key)
242 242
243 243 def dev(self):
244 244 return os.lstat(self.path).st_dev
245 245
246 246 def local(self):
247 247 return True
248 248
249 249 def join(self, f):
250 250 return os.path.join(self.path, f)
251 251
252 252 def wjoin(self, f):
253 253 return os.path.join(self.root, f)
254 254
255 255 def file(self, f):
256 256 if f[0] == '/':
257 257 f = f[1:]
258 258 return filelog.filelog(self.opener, f, self.revlogversion)
259 259
260 260 def getcwd(self):
261 261 return self.dirstate.getcwd()
262 262
263 263 def wfile(self, f, mode='r'):
264 264 return self.wopener(f, mode)
265 265
266 266 def wread(self, filename):
267 267 if self.encodepats == None:
268 268 l = []
269 269 for pat, cmd in self.ui.configitems("encode"):
270 270 mf = util.matcher(self.root, "", [pat], [], [])[1]
271 271 l.append((mf, cmd))
272 272 self.encodepats = l
273 273
274 274 data = self.wopener(filename, 'r').read()
275 275
276 276 for mf, cmd in self.encodepats:
277 277 if mf(filename):
278 278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
279 279 data = util.filter(data, cmd)
280 280 break
281 281
282 282 return data
283 283
284 284 def wwrite(self, filename, data, fd=None):
285 285 if self.decodepats == None:
286 286 l = []
287 287 for pat, cmd in self.ui.configitems("decode"):
288 288 mf = util.matcher(self.root, "", [pat], [], [])[1]
289 289 l.append((mf, cmd))
290 290 self.decodepats = l
291 291
292 292 for mf, cmd in self.decodepats:
293 293 if mf(filename):
294 294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
295 295 data = util.filter(data, cmd)
296 296 break
297 297
298 298 if fd:
299 299 return fd.write(data)
300 300 return self.wopener(filename, 'w').write(data)
301 301
302 302 def transaction(self):
303 303 tr = self.transhandle
304 304 if tr != None and tr.running():
305 305 return tr.nest()
306 306
307 307 # save dirstate for rollback
308 308 try:
309 309 ds = self.opener("dirstate").read()
310 310 except IOError:
311 311 ds = ""
312 312 self.opener("journal.dirstate", "w").write(ds)
313 313
314 314 tr = transaction.transaction(self.ui.warn, self.opener,
315 315 self.join("journal"),
316 316 aftertrans(self.path))
317 317 self.transhandle = tr
318 318 return tr
319 319
320 320 def recover(self):
321 321 l = self.lock()
322 322 if os.path.exists(self.join("journal")):
323 323 self.ui.status(_("rolling back interrupted transaction\n"))
324 324 transaction.rollback(self.opener, self.join("journal"))
325 325 self.reload()
326 326 return True
327 327 else:
328 328 self.ui.warn(_("no interrupted transaction available\n"))
329 329 return False
330 330
331 331 def rollback(self, wlock=None):
332 332 if not wlock:
333 333 wlock = self.wlock()
334 334 l = self.lock()
335 335 if os.path.exists(self.join("undo")):
336 336 self.ui.status(_("rolling back last transaction\n"))
337 337 transaction.rollback(self.opener, self.join("undo"))
338 338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
339 339 self.reload()
340 340 self.wreload()
341 341 else:
342 342 self.ui.warn(_("no rollback information available\n"))
343 343
344 344 def wreload(self):
345 345 self.dirstate.read()
346 346
347 347 def reload(self):
348 348 self.changelog.load()
349 349 self.manifest.load()
350 350 self.tagscache = None
351 351 self.nodetagscache = None
352 352
353 353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
354 354 desc=None):
355 355 try:
356 356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
357 357 except lock.LockHeld, inst:
358 358 if not wait:
359 359 raise
360 360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
361 361 (desc, inst.args[0]))
362 362 # default to 600 seconds timeout
363 363 l = lock.lock(self.join(lockname),
364 364 int(self.ui.config("ui", "timeout") or 600),
365 365 releasefn, desc=desc)
366 366 if acquirefn:
367 367 acquirefn()
368 368 return l
369 369
370 370 def lock(self, wait=1):
371 371 return self.do_lock("lock", wait, acquirefn=self.reload,
372 372 desc=_('repository %s') % self.origroot)
373 373
374 374 def wlock(self, wait=1):
375 375 return self.do_lock("wlock", wait, self.dirstate.write,
376 376 self.wreload,
377 377 desc=_('working directory of %s') % self.origroot)
378 378
379 379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
380 380 "determine whether a new filenode is needed"
381 381 fp1 = manifest1.get(filename, nullid)
382 382 fp2 = manifest2.get(filename, nullid)
383 383
384 384 if fp2 != nullid:
385 385 # is one parent an ancestor of the other?
386 386 fpa = filelog.ancestor(fp1, fp2)
387 387 if fpa == fp1:
388 388 fp1, fp2 = fp2, nullid
389 389 elif fpa == fp2:
390 390 fp2 = nullid
391 391
392 392 # is the file unmodified from the parent? report existing entry
393 393 if fp2 == nullid and text == filelog.read(fp1):
394 394 return (fp1, None, None)
395 395
396 396 return (None, fp1, fp2)
397 397
398 398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
399 399 orig_parent = self.dirstate.parents()[0] or nullid
400 400 p1 = p1 or self.dirstate.parents()[0] or nullid
401 401 p2 = p2 or self.dirstate.parents()[1] or nullid
402 402 c1 = self.changelog.read(p1)
403 403 c2 = self.changelog.read(p2)
404 404 m1 = self.manifest.read(c1[0])
405 405 mf1 = self.manifest.readflags(c1[0])
406 406 m2 = self.manifest.read(c2[0])
407 407 changed = []
408 408
409 409 if orig_parent == p1:
410 410 update_dirstate = 1
411 411 else:
412 412 update_dirstate = 0
413 413
414 414 if not wlock:
415 415 wlock = self.wlock()
416 416 l = self.lock()
417 417 tr = self.transaction()
418 418 mm = m1.copy()
419 419 mfm = mf1.copy()
420 420 linkrev = self.changelog.count()
421 421 for f in files:
422 422 try:
423 423 t = self.wread(f)
424 424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
425 425 r = self.file(f)
426 426 mfm[f] = tm
427 427
428 428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
429 429 if entry:
430 430 mm[f] = entry
431 431 continue
432 432
433 433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
434 434 changed.append(f)
435 435 if update_dirstate:
436 436 self.dirstate.update([f], "n")
437 437 except IOError:
438 438 try:
439 439 del mm[f]
440 440 del mfm[f]
441 441 if update_dirstate:
442 442 self.dirstate.forget([f])
443 443 except:
444 444 # deleted from p2?
445 445 pass
446 446
447 447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
448 448 user = user or self.ui.username()
449 449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
450 450 tr.close()
451 451 if update_dirstate:
452 452 self.dirstate.setparents(n, nullid)
453 453
454 454 def commit(self, files=None, text="", user=None, date=None,
455 455 match=util.always, force=False, lock=None, wlock=None,
456 456 force_editor=False):
457 457 commit = []
458 458 remove = []
459 459 changed = []
460 460
461 461 if files:
462 462 for f in files:
463 463 s = self.dirstate.state(f)
464 464 if s in 'nmai':
465 465 commit.append(f)
466 466 elif s == 'r':
467 467 remove.append(f)
468 468 else:
469 469 self.ui.warn(_("%s not tracked!\n") % f)
470 470 else:
471 471 modified, added, removed, deleted, unknown = self.changes(match=match)
472 472 commit = modified + added
473 473 remove = removed
474 474
475 475 p1, p2 = self.dirstate.parents()
476 476 c1 = self.changelog.read(p1)
477 477 c2 = self.changelog.read(p2)
478 478 m1 = self.manifest.read(c1[0])
479 479 mf1 = self.manifest.readflags(c1[0])
480 480 m2 = self.manifest.read(c2[0])
481 481
482 482 if not commit and not remove and not force and p2 == nullid:
483 483 self.ui.status(_("nothing changed\n"))
484 484 return None
485 485
486 486 xp1 = hex(p1)
487 487 if p2 == nullid: xp2 = ''
488 488 else: xp2 = hex(p2)
489 489
490 490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
491 491
492 492 if not wlock:
493 493 wlock = self.wlock()
494 494 if not lock:
495 495 lock = self.lock()
496 496 tr = self.transaction()
497 497
498 498 # check in files
499 499 new = {}
500 500 linkrev = self.changelog.count()
501 501 commit.sort()
502 502 for f in commit:
503 503 self.ui.note(f + "\n")
504 504 try:
505 505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
506 506 t = self.wread(f)
507 507 except IOError:
508 508 self.ui.warn(_("trouble committing %s!\n") % f)
509 509 raise
510 510
511 511 r = self.file(f)
512 512
513 513 meta = {}
514 514 cp = self.dirstate.copied(f)
515 515 if cp:
516 516 meta["copy"] = cp
517 517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
518 518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
519 519 fp1, fp2 = nullid, nullid
520 520 else:
521 521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
522 522 if entry:
523 523 new[f] = entry
524 524 continue
525 525
526 526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
527 527 # remember what we've added so that we can later calculate
528 528 # the files to pull from a set of changesets
529 529 changed.append(f)
530 530
531 531 # update manifest
532 532 m1 = m1.copy()
533 533 m1.update(new)
534 534 for f in remove:
535 535 if f in m1:
536 536 del m1[f]
537 537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
538 538 (new, remove))
539 539
540 540 # add changeset
541 541 new = new.keys()
542 542 new.sort()
543 543
544 544 user = user or self.ui.username()
545 545 if not text or force_editor:
546 546 edittext = []
547 547 if text:
548 548 edittext.append(text)
549 549 edittext.append("")
550 550 if p2 != nullid:
551 551 edittext.append("HG: branch merge")
552 552 edittext.extend(["HG: changed %s" % f for f in changed])
553 553 edittext.extend(["HG: removed %s" % f for f in remove])
554 554 if not changed and not remove:
555 555 edittext.append("HG: no files changed")
556 556 edittext.append("")
557 557 # run editor in the repository root
558 558 olddir = os.getcwd()
559 559 os.chdir(self.root)
560 560 text = self.ui.edit("\n".join(edittext), user)
561 561 os.chdir(olddir)
562 562
563 563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
564 564 while lines and not lines[0]:
565 565 del lines[0]
566 566 if not lines:
567 567 return None
568 568 text = '\n'.join(lines)
569 569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
570 570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
571 571 parent2=xp2)
572 572 tr.close()
573 573
574 574 self.dirstate.setparents(n)
575 575 self.dirstate.update(new, "n")
576 576 self.dirstate.forget(remove)
577 577
578 578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
579 579 return n
580 580
581 581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
582 582 if node:
583 583 fdict = dict.fromkeys(files)
584 584 for fn in self.manifest.read(self.changelog.read(node)[0]):
585 585 fdict.pop(fn, None)
586 586 if match(fn):
587 587 yield 'm', fn
588 588 for fn in fdict:
589 589 if badmatch and badmatch(fn):
590 590 if match(fn):
591 591 yield 'b', fn
592 592 else:
593 593 self.ui.warn(_('%s: No such file in rev %s\n') % (
594 594 util.pathto(self.getcwd(), fn), short(node)))
595 595 else:
596 596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
597 597 yield src, fn
598 598
599 599 def changes(self, node1=None, node2=None, files=[], match=util.always,
600 600 wlock=None, show_ignored=None):
601 601 """return changes between two nodes or node and working directory
602 602
603 603 If node1 is None, use the first dirstate parent instead.
604 604 If node2 is None, compare node1 with working directory.
605 605 """
606 606
607 607 def fcmp(fn, mf):
608 608 t1 = self.wread(fn)
609 609 t2 = self.file(fn).read(mf.get(fn, nullid))
610 610 return cmp(t1, t2)
611 611
612 612 def mfmatches(node):
613 613 change = self.changelog.read(node)
614 614 mf = dict(self.manifest.read(change[0]))
615 615 for fn in mf.keys():
616 616 if not match(fn):
617 617 del mf[fn]
618 618 return mf
619 619
620 modified, added, removed, deleted, unknown, ignored = [],[],[],[],[],[]
620 621 compareworking = False
621 622 if not node1 or node1 == self.dirstate.parents()[0]:
622 623 compareworking = True
623 624
624 625 if not compareworking:
625 626 # read the manifest from node1 before the manifest from node2,
626 627 # so that we'll hit the manifest cache if we're going through
627 628 # all the revisions in parent->child order.
628 629 mf1 = mfmatches(node1)
629 630
630 631 # are we comparing the working directory?
631 632 if not node2:
632 633 if not wlock:
633 634 try:
634 635 wlock = self.wlock(wait=0)
635 636 except lock.LockException:
636 637 wlock = None
637 638 lookup, modified, added, removed, deleted, unknown, ignored = (
638 639 self.dirstate.changes(files, match, show_ignored))
639 640
640 641 # are we comparing working dir against its parent?
641 642 if compareworking:
642 643 if lookup:
643 644 # do a full compare of any files that might have changed
644 645 mf2 = mfmatches(self.dirstate.parents()[0])
645 646 for f in lookup:
646 647 if fcmp(f, mf2):
647 648 modified.append(f)
648 649 elif wlock is not None:
649 650 self.dirstate.update([f], "n")
650 651 else:
651 652 # we are comparing working dir against non-parent
652 653 # generate a pseudo-manifest for the working dir
653 654 mf2 = mfmatches(self.dirstate.parents()[0])
654 655 for f in lookup + modified + added:
655 656 mf2[f] = ""
656 657 for f in removed:
657 658 if f in mf2:
658 659 del mf2[f]
659 660 else:
660 661 # we are comparing two revisions
661 662 deleted, unknown, ignored = [], [], []
662 663 mf2 = mfmatches(node2)
663 664
664 665 if not compareworking:
665 666 # flush lists from dirstate before comparing manifests
666 667 modified, added = [], []
667 668
668 669 # make sure to sort the files so we talk to the disk in a
669 670 # reasonable order
670 671 mf2keys = mf2.keys()
671 672 mf2keys.sort()
672 673 for fn in mf2keys:
673 674 if mf1.has_key(fn):
674 675 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
675 676 modified.append(fn)
676 677 del mf1[fn]
677 678 else:
678 679 added.append(fn)
679 680
680 681 removed = mf1.keys()
681 682
682 683 # sort and return results:
683 684 for l in modified, added, removed, deleted, unknown, ignored:
684 685 l.sort()
685 686 if show_ignored is None:
686 687 return (modified, added, removed, deleted, unknown)
687 688 else:
688 689 return (modified, added, removed, deleted, unknown, ignored)
689 690
690 691 def add(self, list, wlock=None):
691 692 if not wlock:
692 693 wlock = self.wlock()
693 694 for f in list:
694 695 p = self.wjoin(f)
695 696 if not os.path.exists(p):
696 697 self.ui.warn(_("%s does not exist!\n") % f)
697 698 elif not os.path.isfile(p):
698 699 self.ui.warn(_("%s not added: only files supported currently\n")
699 700 % f)
700 701 elif self.dirstate.state(f) in 'an':
701 702 self.ui.warn(_("%s already tracked!\n") % f)
702 703 else:
703 704 self.dirstate.update([f], "a")
704 705
705 706 def forget(self, list, wlock=None):
706 707 if not wlock:
707 708 wlock = self.wlock()
708 709 for f in list:
709 710 if self.dirstate.state(f) not in 'ai':
710 711 self.ui.warn(_("%s not added!\n") % f)
711 712 else:
712 713 self.dirstate.forget([f])
713 714
714 715 def remove(self, list, unlink=False, wlock=None):
715 716 if unlink:
716 717 for f in list:
717 718 try:
718 719 util.unlink(self.wjoin(f))
719 720 except OSError, inst:
720 721 if inst.errno != errno.ENOENT:
721 722 raise
722 723 if not wlock:
723 724 wlock = self.wlock()
724 725 for f in list:
725 726 p = self.wjoin(f)
726 727 if os.path.exists(p):
727 728 self.ui.warn(_("%s still exists!\n") % f)
728 729 elif self.dirstate.state(f) == 'a':
729 730 self.dirstate.forget([f])
730 731 elif f not in self.dirstate:
731 732 self.ui.warn(_("%s not tracked!\n") % f)
732 733 else:
733 734 self.dirstate.update([f], "r")
734 735
735 736 def undelete(self, list, wlock=None):
736 737 p = self.dirstate.parents()[0]
737 738 mn = self.changelog.read(p)[0]
738 739 mf = self.manifest.readflags(mn)
739 740 m = self.manifest.read(mn)
740 741 if not wlock:
741 742 wlock = self.wlock()
742 743 for f in list:
743 744 if self.dirstate.state(f) not in "r":
744 745 self.ui.warn("%s not removed!\n" % f)
745 746 else:
746 747 t = self.file(f).read(m[f])
747 748 self.wwrite(f, t)
748 749 util.set_exec(self.wjoin(f), mf[f])
749 750 self.dirstate.update([f], "n")
750 751
751 752 def copy(self, source, dest, wlock=None):
752 753 p = self.wjoin(dest)
753 754 if not os.path.exists(p):
754 755 self.ui.warn(_("%s does not exist!\n") % dest)
755 756 elif not os.path.isfile(p):
756 757 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
757 758 else:
758 759 if not wlock:
759 760 wlock = self.wlock()
760 761 if self.dirstate.state(dest) == '?':
761 762 self.dirstate.update([dest], "a")
762 763 self.dirstate.copy(source, dest)
763 764
764 765 def heads(self, start=None):
765 766 heads = self.changelog.heads(start)
766 767 # sort the output in rev descending order
767 768 heads = [(-self.changelog.rev(h), h) for h in heads]
768 769 heads.sort()
769 770 return [n for (r, n) in heads]
770 771
771 772 # branchlookup returns a dict giving a list of branches for
772 773 # each head. A branch is defined as the tag of a node or
773 774 # the branch of the node's parents. If a node has multiple
774 775 # branch tags, tags are eliminated if they are visible from other
775 776 # branch tags.
776 777 #
777 778 # So, for this graph: a->b->c->d->e
778 779 # \ /
779 780 # aa -----/
780 781 # a has tag 2.6.12
781 782 # d has tag 2.6.13
782 783 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
783 784 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
784 785 # from the list.
785 786 #
786 787 # It is possible that more than one head will have the same branch tag.
787 788 # callers need to check the result for multiple heads under the same
788 789 # branch tag if that is a problem for them (ie checkout of a specific
789 790 # branch).
790 791 #
791 792 # passing in a specific branch will limit the depth of the search
792 793 # through the parents. It won't limit the branches returned in the
793 794 # result though.
794 795 def branchlookup(self, heads=None, branch=None):
795 796 if not heads:
796 797 heads = self.heads()
797 798 headt = [ h for h in heads ]
798 799 chlog = self.changelog
799 800 branches = {}
800 801 merges = []
801 802 seenmerge = {}
802 803
803 804 # traverse the tree once for each head, recording in the branches
804 805 # dict which tags are visible from this head. The branches
805 806 # dict also records which tags are visible from each tag
806 807 # while we traverse.
807 808 while headt or merges:
808 809 if merges:
809 810 n, found = merges.pop()
810 811 visit = [n]
811 812 else:
812 813 h = headt.pop()
813 814 visit = [h]
814 815 found = [h]
815 816 seen = {}
816 817 while visit:
817 818 n = visit.pop()
818 819 if n in seen:
819 820 continue
820 821 pp = chlog.parents(n)
821 822 tags = self.nodetags(n)
822 823 if tags:
823 824 for x in tags:
824 825 if x == 'tip':
825 826 continue
826 827 for f in found:
827 828 branches.setdefault(f, {})[n] = 1
828 829 branches.setdefault(n, {})[n] = 1
829 830 break
830 831 if n not in found:
831 832 found.append(n)
832 833 if branch in tags:
833 834 continue
834 835 seen[n] = 1
835 836 if pp[1] != nullid and n not in seenmerge:
836 837 merges.append((pp[1], [x for x in found]))
837 838 seenmerge[n] = 1
838 839 if pp[0] != nullid:
839 840 visit.append(pp[0])
840 841 # traverse the branches dict, eliminating branch tags from each
841 842 # head that are visible from another branch tag for that head.
842 843 out = {}
843 844 viscache = {}
844 845 for h in heads:
845 846 def visible(node):
846 847 if node in viscache:
847 848 return viscache[node]
848 849 ret = {}
849 850 visit = [node]
850 851 while visit:
851 852 x = visit.pop()
852 853 if x in viscache:
853 854 ret.update(viscache[x])
854 855 elif x not in ret:
855 856 ret[x] = 1
856 857 if x in branches:
857 858 visit[len(visit):] = branches[x].keys()
858 859 viscache[node] = ret
859 860 return ret
860 861 if h not in branches:
861 862 continue
862 863 # O(n^2), but somewhat limited. This only searches the
863 864 # tags visible from a specific head, not all the tags in the
864 865 # whole repo.
865 866 for b in branches[h]:
866 867 vis = False
867 868 for bb in branches[h].keys():
868 869 if b != bb:
869 870 if b in visible(bb):
870 871 vis = True
871 872 break
872 873 if not vis:
873 874 l = out.setdefault(h, [])
874 875 l[len(l):] = self.nodetags(b)
875 876 return out
876 877
877 878 def branches(self, nodes):
878 879 if not nodes:
879 880 nodes = [self.changelog.tip()]
880 881 b = []
881 882 for n in nodes:
882 883 t = n
883 884 while 1:
884 885 p = self.changelog.parents(n)
885 886 if p[1] != nullid or p[0] == nullid:
886 887 b.append((t, n, p[0], p[1]))
887 888 break
888 889 n = p[0]
889 890 return b
890 891
891 892 def between(self, pairs):
892 893 r = []
893 894
894 895 for top, bottom in pairs:
895 896 n, l, i = top, [], 0
896 897 f = 1
897 898
898 899 while n != bottom:
899 900 p = self.changelog.parents(n)[0]
900 901 if i == f:
901 902 l.append(n)
902 903 f = f * 2
903 904 n = p
904 905 i += 1
905 906
906 907 r.append(l)
907 908
908 909 return r
909 910
910 911 def findincoming(self, remote, base=None, heads=None, force=False):
911 912 """Return list of roots of the subsets of missing nodes from remote
912 913
913 914 If base dict is specified, assume that these nodes and their parents
914 915 exist on the remote side and that no child of a node of base exists
915 916 in both remote and self.
916 917 Furthermore base will be updated to include the nodes that exists
917 918 in self and remote but no children exists in self and remote.
918 919 If a list of heads is specified, return only nodes which are heads
919 920 or ancestors of these heads.
920 921
921 922 All the ancestors of base are in self and in remote.
922 923 All the descendants of the list returned are missing in self.
923 924 (and so we know that the rest of the nodes are missing in remote, see
924 925 outgoing)
925 926 """
926 927 m = self.changelog.nodemap
927 928 search = []
928 929 fetch = {}
929 930 seen = {}
930 931 seenbranch = {}
931 932 if base == None:
932 933 base = {}
933 934
934 935 if not heads:
935 936 heads = remote.heads()
936 937
937 938 if self.changelog.tip() == nullid:
938 939 base[nullid] = 1
939 940 if heads != [nullid]:
940 941 return [nullid]
941 942 return []
942 943
943 944 # assume we're closer to the tip than the root
944 945 # and start by examining the heads
945 946 self.ui.status(_("searching for changes\n"))
946 947
947 948 unknown = []
948 949 for h in heads:
949 950 if h not in m:
950 951 unknown.append(h)
951 952 else:
952 953 base[h] = 1
953 954
954 955 if not unknown:
955 956 return []
956 957
957 958 req = dict.fromkeys(unknown)
958 959 reqcnt = 0
959 960
960 961 # search through remote branches
961 962 # a 'branch' here is a linear segment of history, with four parts:
962 963 # head, root, first parent, second parent
963 964 # (a branch always has two parents (or none) by definition)
964 965 unknown = remote.branches(unknown)
965 966 while unknown:
966 967 r = []
967 968 while unknown:
968 969 n = unknown.pop(0)
969 970 if n[0] in seen:
970 971 continue
971 972
972 973 self.ui.debug(_("examining %s:%s\n")
973 974 % (short(n[0]), short(n[1])))
974 975 if n[0] == nullid: # found the end of the branch
975 976 pass
976 977 elif n in seenbranch:
977 978 self.ui.debug(_("branch already found\n"))
978 979 continue
979 980 elif n[1] and n[1] in m: # do we know the base?
980 981 self.ui.debug(_("found incomplete branch %s:%s\n")
981 982 % (short(n[0]), short(n[1])))
982 983 search.append(n) # schedule branch range for scanning
983 984 seenbranch[n] = 1
984 985 else:
985 986 if n[1] not in seen and n[1] not in fetch:
986 987 if n[2] in m and n[3] in m:
987 988 self.ui.debug(_("found new changeset %s\n") %
988 989 short(n[1]))
989 990 fetch[n[1]] = 1 # earliest unknown
990 991 for p in n[2:4]:
991 992 if p in m:
992 993 base[p] = 1 # latest known
993 994
994 995 for p in n[2:4]:
995 996 if p not in req and p not in m:
996 997 r.append(p)
997 998 req[p] = 1
998 999 seen[n[0]] = 1
999 1000
1000 1001 if r:
1001 1002 reqcnt += 1
1002 1003 self.ui.debug(_("request %d: %s\n") %
1003 1004 (reqcnt, " ".join(map(short, r))))
1004 1005 for p in range(0, len(r), 10):
1005 1006 for b in remote.branches(r[p:p+10]):
1006 1007 self.ui.debug(_("received %s:%s\n") %
1007 1008 (short(b[0]), short(b[1])))
1008 1009 unknown.append(b)
1009 1010
1010 1011 # do binary search on the branches we found
1011 1012 while search:
1012 1013 n = search.pop(0)
1013 1014 reqcnt += 1
1014 1015 l = remote.between([(n[0], n[1])])[0]
1015 1016 l.append(n[1])
1016 1017 p = n[0]
1017 1018 f = 1
1018 1019 for i in l:
1019 1020 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1020 1021 if i in m:
1021 1022 if f <= 2:
1022 1023 self.ui.debug(_("found new branch changeset %s\n") %
1023 1024 short(p))
1024 1025 fetch[p] = 1
1025 1026 base[i] = 1
1026 1027 else:
1027 1028 self.ui.debug(_("narrowed branch search to %s:%s\n")
1028 1029 % (short(p), short(i)))
1029 1030 search.append((p, i))
1030 1031 break
1031 1032 p, f = i, f * 2
1032 1033
1033 1034 # sanity check our fetch list
1034 1035 for f in fetch.keys():
1035 1036 if f in m:
1036 1037 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1037 1038
1038 1039 if base.keys() == [nullid]:
1039 1040 if force:
1040 1041 self.ui.warn(_("warning: repository is unrelated\n"))
1041 1042 else:
1042 1043 raise util.Abort(_("repository is unrelated"))
1043 1044
1044 1045 self.ui.note(_("found new changesets starting at ") +
1045 1046 " ".join([short(f) for f in fetch]) + "\n")
1046 1047
1047 1048 self.ui.debug(_("%d total queries\n") % reqcnt)
1048 1049
1049 1050 return fetch.keys()
1050 1051
1051 1052 def findoutgoing(self, remote, base=None, heads=None, force=False):
1052 1053 """Return list of nodes that are roots of subsets not in remote
1053 1054
1054 1055 If base dict is specified, assume that these nodes and their parents
1055 1056 exist on the remote side.
1056 1057 If a list of heads is specified, return only nodes which are heads
1057 1058 or ancestors of these heads, and return a second element which
1058 1059 contains all remote heads which get new children.
1059 1060 """
1060 1061 if base == None:
1061 1062 base = {}
1062 1063 self.findincoming(remote, base, heads, force=force)
1063 1064
1064 1065 self.ui.debug(_("common changesets up to ")
1065 1066 + " ".join(map(short, base.keys())) + "\n")
1066 1067
1067 1068 remain = dict.fromkeys(self.changelog.nodemap)
1068 1069
1069 1070 # prune everything remote has from the tree
1070 1071 del remain[nullid]
1071 1072 remove = base.keys()
1072 1073 while remove:
1073 1074 n = remove.pop(0)
1074 1075 if n in remain:
1075 1076 del remain[n]
1076 1077 for p in self.changelog.parents(n):
1077 1078 remove.append(p)
1078 1079
1079 1080 # find every node whose parents have been pruned
1080 1081 subset = []
1081 1082 # find every remote head that will get new children
1082 1083 updated_heads = {}
1083 1084 for n in remain:
1084 1085 p1, p2 = self.changelog.parents(n)
1085 1086 if p1 not in remain and p2 not in remain:
1086 1087 subset.append(n)
1087 1088 if heads:
1088 1089 if p1 in heads:
1089 1090 updated_heads[p1] = True
1090 1091 if p2 in heads:
1091 1092 updated_heads[p2] = True
1092 1093
1093 1094 # this is the set of all roots we have to push
1094 1095 if heads:
1095 1096 return subset, updated_heads.keys()
1096 1097 else:
1097 1098 return subset
1098 1099
1099 1100 def pull(self, remote, heads=None, force=False):
1100 1101 l = self.lock()
1101 1102
1102 1103 fetch = self.findincoming(remote, force=force)
1103 1104 if fetch == [nullid]:
1104 1105 self.ui.status(_("requesting all changes\n"))
1105 1106
1106 1107 if not fetch:
1107 1108 self.ui.status(_("no changes found\n"))
1108 1109 return 0
1109 1110
1110 1111 if heads is None:
1111 1112 cg = remote.changegroup(fetch, 'pull')
1112 1113 else:
1113 1114 cg = remote.changegroupsubset(fetch, heads, 'pull')
1114 1115 return self.addchangegroup(cg, 'pull')
1115 1116
1116 1117 def push(self, remote, force=False, revs=None):
1117 1118 # there are two ways to push to remote repo:
1118 1119 #
1119 1120 # addchangegroup assumes local user can lock remote
1120 1121 # repo (local filesystem, old ssh servers).
1121 1122 #
1122 1123 # unbundle assumes local user cannot lock remote repo (new ssh
1123 1124 # servers, http servers).
1124 1125
1125 1126 if 'unbundle' in remote.capabilities:
1126 1127 return self.push_unbundle(remote, force, revs)
1127 1128 return self.push_addchangegroup(remote, force, revs)
1128 1129
1129 1130 def prepush(self, remote, force, revs):
1130 1131 base = {}
1131 1132 remote_heads = remote.heads()
1132 1133 inc = self.findincoming(remote, base, remote_heads, force=force)
1133 1134 if not force and inc:
1134 1135 self.ui.warn(_("abort: unsynced remote changes!\n"))
1135 1136 self.ui.status(_("(did you forget to sync?"
1136 1137 " use push -f to force)\n"))
1137 1138 return None, 1
1138 1139
1139 1140 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1140 1141 if revs is not None:
1141 1142 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1142 1143 else:
1143 1144 bases, heads = update, self.changelog.heads()
1144 1145
1145 1146 if not bases:
1146 1147 self.ui.status(_("no changes found\n"))
1147 1148 return None, 1
1148 1149 elif not force:
1149 1150 # FIXME we don't properly detect creation of new heads
1150 1151 # in the push -r case, assume the user knows what he's doing
1151 1152 if not revs and len(remote_heads) < len(heads) \
1152 1153 and remote_heads != [nullid]:
1153 1154 self.ui.warn(_("abort: push creates new remote branches!\n"))
1154 1155 self.ui.status(_("(did you forget to merge?"
1155 1156 " use push -f to force)\n"))
1156 1157 return None, 1
1157 1158
1158 1159 if revs is None:
1159 1160 cg = self.changegroup(update, 'push')
1160 1161 else:
1161 1162 cg = self.changegroupsubset(update, revs, 'push')
1162 1163 return cg, remote_heads
1163 1164
1164 1165 def push_addchangegroup(self, remote, force, revs):
1165 1166 lock = remote.lock()
1166 1167
1167 1168 ret = self.prepush(remote, force, revs)
1168 1169 if ret[0] is not None:
1169 1170 cg, remote_heads = ret
1170 1171 return remote.addchangegroup(cg, 'push')
1171 1172 return ret[1]
1172 1173
1173 1174 def push_unbundle(self, remote, force, revs):
1174 1175 # local repo finds heads on server, finds out what revs it
1175 1176 # must push. once revs transferred, if server finds it has
1176 1177 # different heads (someone else won commit/push race), server
1177 1178 # aborts.
1178 1179
1179 1180 ret = self.prepush(remote, force, revs)
1180 1181 if ret[0] is not None:
1181 1182 cg, remote_heads = ret
1182 1183 if force: remote_heads = ['force']
1183 1184 return remote.unbundle(cg, remote_heads, 'push')
1184 1185 return ret[1]
1185 1186
1186 1187 def changegroupsubset(self, bases, heads, source):
1187 1188 """This function generates a changegroup consisting of all the nodes
1188 1189 that are descendents of any of the bases, and ancestors of any of
1189 1190 the heads.
1190 1191
1191 1192 It is fairly complex as determining which filenodes and which
1192 1193 manifest nodes need to be included for the changeset to be complete
1193 1194 is non-trivial.
1194 1195
1195 1196 Another wrinkle is doing the reverse, figuring out which changeset in
1196 1197 the changegroup a particular filenode or manifestnode belongs to."""
1197 1198
1198 1199 self.hook('preoutgoing', throw=True, source=source)
1199 1200
1200 1201 # Set up some initial variables
1201 1202 # Make it easy to refer to self.changelog
1202 1203 cl = self.changelog
1203 1204 # msng is short for missing - compute the list of changesets in this
1204 1205 # changegroup.
1205 1206 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1206 1207 # Some bases may turn out to be superfluous, and some heads may be
1207 1208 # too. nodesbetween will return the minimal set of bases and heads
1208 1209 # necessary to re-create the changegroup.
1209 1210
1210 1211 # Known heads are the list of heads that it is assumed the recipient
1211 1212 # of this changegroup will know about.
1212 1213 knownheads = {}
1213 1214 # We assume that all parents of bases are known heads.
1214 1215 for n in bases:
1215 1216 for p in cl.parents(n):
1216 1217 if p != nullid:
1217 1218 knownheads[p] = 1
1218 1219 knownheads = knownheads.keys()
1219 1220 if knownheads:
1220 1221 # Now that we know what heads are known, we can compute which
1221 1222 # changesets are known. The recipient must know about all
1222 1223 # changesets required to reach the known heads from the null
1223 1224 # changeset.
1224 1225 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1225 1226 junk = None
1226 1227 # Transform the list into an ersatz set.
1227 1228 has_cl_set = dict.fromkeys(has_cl_set)
1228 1229 else:
1229 1230 # If there were no known heads, the recipient cannot be assumed to
1230 1231 # know about any changesets.
1231 1232 has_cl_set = {}
1232 1233
1233 1234 # Make it easy to refer to self.manifest
1234 1235 mnfst = self.manifest
1235 1236 # We don't know which manifests are missing yet
1236 1237 msng_mnfst_set = {}
1237 1238 # Nor do we know which filenodes are missing.
1238 1239 msng_filenode_set = {}
1239 1240
1240 1241 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1241 1242 junk = None
1242 1243
1243 1244 # A changeset always belongs to itself, so the changenode lookup
1244 1245 # function for a changenode is identity.
1245 1246 def identity(x):
1246 1247 return x
1247 1248
1248 1249 # A function generating function. Sets up an environment for the
1249 1250 # inner function.
1250 1251 def cmp_by_rev_func(revlog):
1251 1252 # Compare two nodes by their revision number in the environment's
1252 1253 # revision history. Since the revision number both represents the
1253 1254 # most efficient order to read the nodes in, and represents a
1254 1255 # topological sorting of the nodes, this function is often useful.
1255 1256 def cmp_by_rev(a, b):
1256 1257 return cmp(revlog.rev(a), revlog.rev(b))
1257 1258 return cmp_by_rev
1258 1259
1259 1260 # If we determine that a particular file or manifest node must be a
1260 1261 # node that the recipient of the changegroup will already have, we can
1261 1262 # also assume the recipient will have all the parents. This function
1262 1263 # prunes them from the set of missing nodes.
1263 1264 def prune_parents(revlog, hasset, msngset):
1264 1265 haslst = hasset.keys()
1265 1266 haslst.sort(cmp_by_rev_func(revlog))
1266 1267 for node in haslst:
1267 1268 parentlst = [p for p in revlog.parents(node) if p != nullid]
1268 1269 while parentlst:
1269 1270 n = parentlst.pop()
1270 1271 if n not in hasset:
1271 1272 hasset[n] = 1
1272 1273 p = [p for p in revlog.parents(n) if p != nullid]
1273 1274 parentlst.extend(p)
1274 1275 for n in hasset:
1275 1276 msngset.pop(n, None)
1276 1277
1277 1278 # This is a function generating function used to set up an environment
1278 1279 # for the inner function to execute in.
1279 1280 def manifest_and_file_collector(changedfileset):
1280 1281 # This is an information gathering function that gathers
1281 1282 # information from each changeset node that goes out as part of
1282 1283 # the changegroup. The information gathered is a list of which
1283 1284 # manifest nodes are potentially required (the recipient may
1284 1285 # already have them) and total list of all files which were
1285 1286 # changed in any changeset in the changegroup.
1286 1287 #
1287 1288 # We also remember the first changenode we saw any manifest
1288 1289 # referenced by so we can later determine which changenode 'owns'
1289 1290 # the manifest.
1290 1291 def collect_manifests_and_files(clnode):
1291 1292 c = cl.read(clnode)
1292 1293 for f in c[3]:
1293 1294 # This is to make sure we only have one instance of each
1294 1295 # filename string for each filename.
1295 1296 changedfileset.setdefault(f, f)
1296 1297 msng_mnfst_set.setdefault(c[0], clnode)
1297 1298 return collect_manifests_and_files
1298 1299
1299 1300 # Figure out which manifest nodes (of the ones we think might be part
1300 1301 # of the changegroup) the recipient must know about and remove them
1301 1302 # from the changegroup.
1302 1303 def prune_manifests():
1303 1304 has_mnfst_set = {}
1304 1305 for n in msng_mnfst_set:
1305 1306 # If a 'missing' manifest thinks it belongs to a changenode
1306 1307 # the recipient is assumed to have, obviously the recipient
1307 1308 # must have that manifest.
1308 1309 linknode = cl.node(mnfst.linkrev(n))
1309 1310 if linknode in has_cl_set:
1310 1311 has_mnfst_set[n] = 1
1311 1312 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1312 1313
1313 1314 # Use the information collected in collect_manifests_and_files to say
1314 1315 # which changenode any manifestnode belongs to.
1315 1316 def lookup_manifest_link(mnfstnode):
1316 1317 return msng_mnfst_set[mnfstnode]
1317 1318
1318 1319 # A function generating function that sets up the initial environment
1319 1320 # the inner function.
1320 1321 def filenode_collector(changedfiles):
1321 1322 next_rev = [0]
1322 1323 # This gathers information from each manifestnode included in the
1323 1324 # changegroup about which filenodes the manifest node references
1324 1325 # so we can include those in the changegroup too.
1325 1326 #
1326 1327 # It also remembers which changenode each filenode belongs to. It
1327 1328 # does this by assuming the a filenode belongs to the changenode
1328 1329 # the first manifest that references it belongs to.
1329 1330 def collect_msng_filenodes(mnfstnode):
1330 1331 r = mnfst.rev(mnfstnode)
1331 1332 if r == next_rev[0]:
1332 1333 # If the last rev we looked at was the one just previous,
1333 1334 # we only need to see a diff.
1334 1335 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1335 1336 # For each line in the delta
1336 1337 for dline in delta.splitlines():
1337 1338 # get the filename and filenode for that line
1338 1339 f, fnode = dline.split('\0')
1339 1340 fnode = bin(fnode[:40])
1340 1341 f = changedfiles.get(f, None)
1341 1342 # And if the file is in the list of files we care
1342 1343 # about.
1343 1344 if f is not None:
1344 1345 # Get the changenode this manifest belongs to
1345 1346 clnode = msng_mnfst_set[mnfstnode]
1346 1347 # Create the set of filenodes for the file if
1347 1348 # there isn't one already.
1348 1349 ndset = msng_filenode_set.setdefault(f, {})
1349 1350 # And set the filenode's changelog node to the
1350 1351 # manifest's if it hasn't been set already.
1351 1352 ndset.setdefault(fnode, clnode)
1352 1353 else:
1353 1354 # Otherwise we need a full manifest.
1354 1355 m = mnfst.read(mnfstnode)
1355 1356 # For every file in we care about.
1356 1357 for f in changedfiles:
1357 1358 fnode = m.get(f, None)
1358 1359 # If it's in the manifest
1359 1360 if fnode is not None:
1360 1361 # See comments above.
1361 1362 clnode = msng_mnfst_set[mnfstnode]
1362 1363 ndset = msng_filenode_set.setdefault(f, {})
1363 1364 ndset.setdefault(fnode, clnode)
1364 1365 # Remember the revision we hope to see next.
1365 1366 next_rev[0] = r + 1
1366 1367 return collect_msng_filenodes
1367 1368
1368 1369 # We have a list of filenodes we think we need for a file, lets remove
1369 1370 # all those we now the recipient must have.
1370 1371 def prune_filenodes(f, filerevlog):
1371 1372 msngset = msng_filenode_set[f]
1372 1373 hasset = {}
1373 1374 # If a 'missing' filenode thinks it belongs to a changenode we
1374 1375 # assume the recipient must have, then the recipient must have
1375 1376 # that filenode.
1376 1377 for n in msngset:
1377 1378 clnode = cl.node(filerevlog.linkrev(n))
1378 1379 if clnode in has_cl_set:
1379 1380 hasset[n] = 1
1380 1381 prune_parents(filerevlog, hasset, msngset)
1381 1382
1382 1383 # A function generator function that sets up the a context for the
1383 1384 # inner function.
1384 1385 def lookup_filenode_link_func(fname):
1385 1386 msngset = msng_filenode_set[fname]
1386 1387 # Lookup the changenode the filenode belongs to.
1387 1388 def lookup_filenode_link(fnode):
1388 1389 return msngset[fnode]
1389 1390 return lookup_filenode_link
1390 1391
1391 1392 # Now that we have all theses utility functions to help out and
1392 1393 # logically divide up the task, generate the group.
1393 1394 def gengroup():
1394 1395 # The set of changed files starts empty.
1395 1396 changedfiles = {}
1396 1397 # Create a changenode group generator that will call our functions
1397 1398 # back to lookup the owning changenode and collect information.
1398 1399 group = cl.group(msng_cl_lst, identity,
1399 1400 manifest_and_file_collector(changedfiles))
1400 1401 for chnk in group:
1401 1402 yield chnk
1402 1403
1403 1404 # The list of manifests has been collected by the generator
1404 1405 # calling our functions back.
1405 1406 prune_manifests()
1406 1407 msng_mnfst_lst = msng_mnfst_set.keys()
1407 1408 # Sort the manifestnodes by revision number.
1408 1409 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1409 1410 # Create a generator for the manifestnodes that calls our lookup
1410 1411 # and data collection functions back.
1411 1412 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1412 1413 filenode_collector(changedfiles))
1413 1414 for chnk in group:
1414 1415 yield chnk
1415 1416
1416 1417 # These are no longer needed, dereference and toss the memory for
1417 1418 # them.
1418 1419 msng_mnfst_lst = None
1419 1420 msng_mnfst_set.clear()
1420 1421
1421 1422 changedfiles = changedfiles.keys()
1422 1423 changedfiles.sort()
1423 1424 # Go through all our files in order sorted by name.
1424 1425 for fname in changedfiles:
1425 1426 filerevlog = self.file(fname)
1426 1427 # Toss out the filenodes that the recipient isn't really
1427 1428 # missing.
1428 1429 if msng_filenode_set.has_key(fname):
1429 1430 prune_filenodes(fname, filerevlog)
1430 1431 msng_filenode_lst = msng_filenode_set[fname].keys()
1431 1432 else:
1432 1433 msng_filenode_lst = []
1433 1434 # If any filenodes are left, generate the group for them,
1434 1435 # otherwise don't bother.
1435 1436 if len(msng_filenode_lst) > 0:
1436 1437 yield changegroup.genchunk(fname)
1437 1438 # Sort the filenodes by their revision #
1438 1439 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1439 1440 # Create a group generator and only pass in a changenode
1440 1441 # lookup function as we need to collect no information
1441 1442 # from filenodes.
1442 1443 group = filerevlog.group(msng_filenode_lst,
1443 1444 lookup_filenode_link_func(fname))
1444 1445 for chnk in group:
1445 1446 yield chnk
1446 1447 if msng_filenode_set.has_key(fname):
1447 1448 # Don't need this anymore, toss it to free memory.
1448 1449 del msng_filenode_set[fname]
1449 1450 # Signal that no more groups are left.
1450 1451 yield changegroup.closechunk()
1451 1452
1452 1453 if msng_cl_lst:
1453 1454 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1454 1455
1455 1456 return util.chunkbuffer(gengroup())
1456 1457
1457 1458 def changegroup(self, basenodes, source):
1458 1459 """Generate a changegroup of all nodes that we have that a recipient
1459 1460 doesn't.
1460 1461
1461 1462 This is much easier than the previous function as we can assume that
1462 1463 the recipient has any changenode we aren't sending them."""
1463 1464
1464 1465 self.hook('preoutgoing', throw=True, source=source)
1465 1466
1466 1467 cl = self.changelog
1467 1468 nodes = cl.nodesbetween(basenodes, None)[0]
1468 1469 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1469 1470
1470 1471 def identity(x):
1471 1472 return x
1472 1473
1473 1474 def gennodelst(revlog):
1474 1475 for r in xrange(0, revlog.count()):
1475 1476 n = revlog.node(r)
1476 1477 if revlog.linkrev(n) in revset:
1477 1478 yield n
1478 1479
1479 1480 def changed_file_collector(changedfileset):
1480 1481 def collect_changed_files(clnode):
1481 1482 c = cl.read(clnode)
1482 1483 for fname in c[3]:
1483 1484 changedfileset[fname] = 1
1484 1485 return collect_changed_files
1485 1486
1486 1487 def lookuprevlink_func(revlog):
1487 1488 def lookuprevlink(n):
1488 1489 return cl.node(revlog.linkrev(n))
1489 1490 return lookuprevlink
1490 1491
1491 1492 def gengroup():
1492 1493 # construct a list of all changed files
1493 1494 changedfiles = {}
1494 1495
1495 1496 for chnk in cl.group(nodes, identity,
1496 1497 changed_file_collector(changedfiles)):
1497 1498 yield chnk
1498 1499 changedfiles = changedfiles.keys()
1499 1500 changedfiles.sort()
1500 1501
1501 1502 mnfst = self.manifest
1502 1503 nodeiter = gennodelst(mnfst)
1503 1504 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1504 1505 yield chnk
1505 1506
1506 1507 for fname in changedfiles:
1507 1508 filerevlog = self.file(fname)
1508 1509 nodeiter = gennodelst(filerevlog)
1509 1510 nodeiter = list(nodeiter)
1510 1511 if nodeiter:
1511 1512 yield changegroup.genchunk(fname)
1512 1513 lookup = lookuprevlink_func(filerevlog)
1513 1514 for chnk in filerevlog.group(nodeiter, lookup):
1514 1515 yield chnk
1515 1516
1516 1517 yield changegroup.closechunk()
1517 1518
1518 1519 if nodes:
1519 1520 self.hook('outgoing', node=hex(nodes[0]), source=source)
1520 1521
1521 1522 return util.chunkbuffer(gengroup())
1522 1523
1523 1524 def addchangegroup(self, source, srctype):
1524 1525 """add changegroup to repo.
1525 1526 returns number of heads modified or added + 1."""
1526 1527
1527 1528 def csmap(x):
1528 1529 self.ui.debug(_("add changeset %s\n") % short(x))
1529 1530 return cl.count()
1530 1531
1531 1532 def revmap(x):
1532 1533 return cl.rev(x)
1533 1534
1534 1535 if not source:
1535 1536 return 0
1536 1537
1537 1538 self.hook('prechangegroup', throw=True, source=srctype)
1538 1539
1539 1540 changesets = files = revisions = 0
1540 1541
1541 1542 tr = self.transaction()
1542 1543
1543 1544 # write changelog data to temp files so concurrent readers will not see
1544 1545 # inconsistent view
1545 1546 cl = None
1546 1547 try:
1547 1548 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1548 1549
1549 1550 oldheads = len(cl.heads())
1550 1551
1551 1552 # pull off the changeset group
1552 1553 self.ui.status(_("adding changesets\n"))
1553 1554 cor = cl.count() - 1
1554 1555 chunkiter = changegroup.chunkiter(source)
1555 1556 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1556 1557 raise util.Abort(_("received changelog group is empty"))
1557 1558 cnr = cl.count() - 1
1558 1559 changesets = cnr - cor
1559 1560
1560 1561 # pull off the manifest group
1561 1562 self.ui.status(_("adding manifests\n"))
1562 1563 chunkiter = changegroup.chunkiter(source)
1563 1564 # no need to check for empty manifest group here:
1564 1565 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1565 1566 # no new manifest will be created and the manifest group will
1566 1567 # be empty during the pull
1567 1568 self.manifest.addgroup(chunkiter, revmap, tr)
1568 1569
1569 1570 # process the files
1570 1571 self.ui.status(_("adding file changes\n"))
1571 1572 while 1:
1572 1573 f = changegroup.getchunk(source)
1573 1574 if not f:
1574 1575 break
1575 1576 self.ui.debug(_("adding %s revisions\n") % f)
1576 1577 fl = self.file(f)
1577 1578 o = fl.count()
1578 1579 chunkiter = changegroup.chunkiter(source)
1579 1580 if fl.addgroup(chunkiter, revmap, tr) is None:
1580 1581 raise util.Abort(_("received file revlog group is empty"))
1581 1582 revisions += fl.count() - o
1582 1583 files += 1
1583 1584
1584 1585 cl.writedata()
1585 1586 finally:
1586 1587 if cl:
1587 1588 cl.cleanup()
1588 1589
1589 1590 # make changelog see real files again
1590 1591 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1591 1592 self.changelog.checkinlinesize(tr)
1592 1593
1593 1594 newheads = len(self.changelog.heads())
1594 1595 heads = ""
1595 1596 if oldheads and newheads != oldheads:
1596 1597 heads = _(" (%+d heads)") % (newheads - oldheads)
1597 1598
1598 1599 self.ui.status(_("added %d changesets"
1599 1600 " with %d changes to %d files%s\n")
1600 1601 % (changesets, revisions, files, heads))
1601 1602
1602 1603 if changesets > 0:
1603 1604 self.hook('pretxnchangegroup', throw=True,
1604 1605 node=hex(self.changelog.node(cor+1)), source=srctype)
1605 1606
1606 1607 tr.close()
1607 1608
1608 1609 if changesets > 0:
1609 1610 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1610 1611 source=srctype)
1611 1612
1612 1613 for i in range(cor + 1, cnr + 1):
1613 1614 self.hook("incoming", node=hex(self.changelog.node(i)),
1614 1615 source=srctype)
1615 1616
1616 1617 return newheads - oldheads + 1
1617 1618
1618 1619 def update(self, node, allow=False, force=False, choose=None,
1619 1620 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1620 1621 pl = self.dirstate.parents()
1621 1622 if not force and pl[1] != nullid:
1622 1623 raise util.Abort(_("outstanding uncommitted merges"))
1623 1624
1624 1625 err = False
1625 1626
1626 1627 p1, p2 = pl[0], node
1627 1628 pa = self.changelog.ancestor(p1, p2)
1628 1629 m1n = self.changelog.read(p1)[0]
1629 1630 m2n = self.changelog.read(p2)[0]
1630 1631 man = self.manifest.ancestor(m1n, m2n)
1631 1632 m1 = self.manifest.read(m1n)
1632 1633 mf1 = self.manifest.readflags(m1n)
1633 1634 m2 = self.manifest.read(m2n).copy()
1634 1635 mf2 = self.manifest.readflags(m2n)
1635 1636 ma = self.manifest.read(man)
1636 1637 mfa = self.manifest.readflags(man)
1637 1638
1638 1639 modified, added, removed, deleted, unknown = self.changes()
1639 1640
1640 1641 # is this a jump, or a merge? i.e. is there a linear path
1641 1642 # from p1 to p2?
1642 1643 linear_path = (pa == p1 or pa == p2)
1643 1644
1644 1645 if allow and linear_path:
1645 1646 raise util.Abort(_("there is nothing to merge, "
1646 1647 "just use 'hg update'"))
1647 1648 if allow and not forcemerge:
1648 1649 if modified or added or removed:
1649 1650 raise util.Abort(_("outstanding uncommitted changes"))
1650 1651
1651 1652 if not forcemerge and not force:
1652 1653 for f in unknown:
1653 1654 if f in m2:
1654 1655 t1 = self.wread(f)
1655 1656 t2 = self.file(f).read(m2[f])
1656 1657 if cmp(t1, t2) != 0:
1657 1658 raise util.Abort(_("'%s' already exists in the working"
1658 1659 " dir and differs from remote") % f)
1659 1660
1660 1661 # resolve the manifest to determine which files
1661 1662 # we care about merging
1662 1663 self.ui.note(_("resolving manifests\n"))
1663 1664 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1664 1665 (force, allow, moddirstate, linear_path))
1665 1666 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1666 1667 (short(man), short(m1n), short(m2n)))
1667 1668
1668 1669 merge = {}
1669 1670 get = {}
1670 1671 remove = []
1671 1672
1672 1673 # construct a working dir manifest
1673 1674 mw = m1.copy()
1674 1675 mfw = mf1.copy()
1675 1676 umap = dict.fromkeys(unknown)
1676 1677
1677 1678 for f in added + modified + unknown:
1678 1679 mw[f] = ""
1679 1680 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1680 1681
1681 1682 if moddirstate and not wlock:
1682 1683 wlock = self.wlock()
1683 1684
1684 1685 for f in deleted + removed:
1685 1686 if f in mw:
1686 1687 del mw[f]
1687 1688
1688 1689 # If we're jumping between revisions (as opposed to merging),
1689 1690 # and if neither the working directory nor the target rev has
1690 1691 # the file, then we need to remove it from the dirstate, to
1691 1692 # prevent the dirstate from listing the file when it is no
1692 1693 # longer in the manifest.
1693 1694 if moddirstate and linear_path and f not in m2:
1694 1695 self.dirstate.forget((f,))
1695 1696
1696 1697 # Compare manifests
1697 1698 for f, n in mw.iteritems():
1698 1699 if choose and not choose(f):
1699 1700 continue
1700 1701 if f in m2:
1701 1702 s = 0
1702 1703
1703 1704 # is the wfile new since m1, and match m2?
1704 1705 if f not in m1:
1705 1706 t1 = self.wread(f)
1706 1707 t2 = self.file(f).read(m2[f])
1707 1708 if cmp(t1, t2) == 0:
1708 1709 n = m2[f]
1709 1710 del t1, t2
1710 1711
1711 1712 # are files different?
1712 1713 if n != m2[f]:
1713 1714 a = ma.get(f, nullid)
1714 1715 # are both different from the ancestor?
1715 1716 if n != a and m2[f] != a:
1716 1717 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1717 1718 # merge executable bits
1718 1719 # "if we changed or they changed, change in merge"
1719 1720 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1720 1721 mode = ((a^b) | (a^c)) ^ a
1721 1722 merge[f] = (m1.get(f, nullid), m2[f], mode)
1722 1723 s = 1
1723 1724 # are we clobbering?
1724 1725 # is remote's version newer?
1725 1726 # or are we going back in time?
1726 1727 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1727 1728 self.ui.debug(_(" remote %s is newer, get\n") % f)
1728 1729 get[f] = m2[f]
1729 1730 s = 1
1730 1731 elif f in umap or f in added:
1731 1732 # this unknown file is the same as the checkout
1732 1733 # we need to reset the dirstate if the file was added
1733 1734 get[f] = m2[f]
1734 1735
1735 1736 if not s and mfw[f] != mf2[f]:
1736 1737 if force:
1737 1738 self.ui.debug(_(" updating permissions for %s\n") % f)
1738 1739 util.set_exec(self.wjoin(f), mf2[f])
1739 1740 else:
1740 1741 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1741 1742 mode = ((a^b) | (a^c)) ^ a
1742 1743 if mode != b:
1743 1744 self.ui.debug(_(" updating permissions for %s\n")
1744 1745 % f)
1745 1746 util.set_exec(self.wjoin(f), mode)
1746 1747 del m2[f]
1747 1748 elif f in ma:
1748 1749 if n != ma[f]:
1749 1750 r = _("d")
1750 1751 if not force and (linear_path or allow):
1751 1752 r = self.ui.prompt(
1752 1753 (_(" local changed %s which remote deleted\n") % f) +
1753 1754 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1754 1755 if r == _("d"):
1755 1756 remove.append(f)
1756 1757 else:
1757 1758 self.ui.debug(_("other deleted %s\n") % f)
1758 1759 remove.append(f) # other deleted it
1759 1760 else:
1760 1761 # file is created on branch or in working directory
1761 1762 if force and f not in umap:
1762 1763 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1763 1764 remove.append(f)
1764 1765 elif n == m1.get(f, nullid): # same as parent
1765 1766 if p2 == pa: # going backwards?
1766 1767 self.ui.debug(_("remote deleted %s\n") % f)
1767 1768 remove.append(f)
1768 1769 else:
1769 1770 self.ui.debug(_("local modified %s, keeping\n") % f)
1770 1771 else:
1771 1772 self.ui.debug(_("working dir created %s, keeping\n") % f)
1772 1773
1773 1774 for f, n in m2.iteritems():
1774 1775 if choose and not choose(f):
1775 1776 continue
1776 1777 if f[0] == "/":
1777 1778 continue
1778 1779 if f in ma and n != ma[f]:
1779 1780 r = _("k")
1780 1781 if not force and (linear_path or allow):
1781 1782 r = self.ui.prompt(
1782 1783 (_("remote changed %s which local deleted\n") % f) +
1783 1784 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1784 1785 if r == _("k"):
1785 1786 get[f] = n
1786 1787 elif f not in ma:
1787 1788 self.ui.debug(_("remote created %s\n") % f)
1788 1789 get[f] = n
1789 1790 else:
1790 1791 if force or p2 == pa: # going backwards?
1791 1792 self.ui.debug(_("local deleted %s, recreating\n") % f)
1792 1793 get[f] = n
1793 1794 else:
1794 1795 self.ui.debug(_("local deleted %s\n") % f)
1795 1796
1796 1797 del mw, m1, m2, ma
1797 1798
1798 1799 if force:
1799 1800 for f in merge:
1800 1801 get[f] = merge[f][1]
1801 1802 merge = {}
1802 1803
1803 1804 if linear_path or force:
1804 1805 # we don't need to do any magic, just jump to the new rev
1805 1806 branch_merge = False
1806 1807 p1, p2 = p2, nullid
1807 1808 else:
1808 1809 if not allow:
1809 1810 self.ui.status(_("this update spans a branch"
1810 1811 " affecting the following files:\n"))
1811 1812 fl = merge.keys() + get.keys()
1812 1813 fl.sort()
1813 1814 for f in fl:
1814 1815 cf = ""
1815 1816 if f in merge:
1816 1817 cf = _(" (resolve)")
1817 1818 self.ui.status(" %s%s\n" % (f, cf))
1818 1819 self.ui.warn(_("aborting update spanning branches!\n"))
1819 1820 self.ui.status(_("(use 'hg merge' to merge across branches"
1820 1821 " or 'hg update -C' to lose changes)\n"))
1821 1822 return 1
1822 1823 branch_merge = True
1823 1824
1824 1825 xp1 = hex(p1)
1825 1826 xp2 = hex(p2)
1826 1827 if p2 == nullid: xxp2 = ''
1827 1828 else: xxp2 = xp2
1828 1829
1829 1830 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1830 1831
1831 1832 # get the files we don't need to change
1832 1833 files = get.keys()
1833 1834 files.sort()
1834 1835 for f in files:
1835 1836 if f[0] == "/":
1836 1837 continue
1837 1838 self.ui.note(_("getting %s\n") % f)
1838 1839 t = self.file(f).read(get[f])
1839 1840 self.wwrite(f, t)
1840 1841 util.set_exec(self.wjoin(f), mf2[f])
1841 1842 if moddirstate:
1842 1843 if branch_merge:
1843 1844 self.dirstate.update([f], 'n', st_mtime=-1)
1844 1845 else:
1845 1846 self.dirstate.update([f], 'n')
1846 1847
1847 1848 # merge the tricky bits
1848 1849 failedmerge = []
1849 1850 files = merge.keys()
1850 1851 files.sort()
1851 1852 for f in files:
1852 1853 self.ui.status(_("merging %s\n") % f)
1853 1854 my, other, flag = merge[f]
1854 1855 ret = self.merge3(f, my, other, xp1, xp2)
1855 1856 if ret:
1856 1857 err = True
1857 1858 failedmerge.append(f)
1858 1859 util.set_exec(self.wjoin(f), flag)
1859 1860 if moddirstate:
1860 1861 if branch_merge:
1861 1862 # We've done a branch merge, mark this file as merged
1862 1863 # so that we properly record the merger later
1863 1864 self.dirstate.update([f], 'm')
1864 1865 else:
1865 1866 # We've update-merged a locally modified file, so
1866 1867 # we set the dirstate to emulate a normal checkout
1867 1868 # of that file some time in the past. Thus our
1868 1869 # merge will appear as a normal local file
1869 1870 # modification.
1870 1871 f_len = len(self.file(f).read(other))
1871 1872 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1872 1873
1873 1874 remove.sort()
1874 1875 for f in remove:
1875 1876 self.ui.note(_("removing %s\n") % f)
1876 1877 util.audit_path(f)
1877 1878 try:
1878 1879 util.unlink(self.wjoin(f))
1879 1880 except OSError, inst:
1880 1881 if inst.errno != errno.ENOENT:
1881 1882 self.ui.warn(_("update failed to remove %s: %s!\n") %
1882 1883 (f, inst.strerror))
1883 1884 if moddirstate:
1884 1885 if branch_merge:
1885 1886 self.dirstate.update(remove, 'r')
1886 1887 else:
1887 1888 self.dirstate.forget(remove)
1888 1889
1889 1890 if moddirstate:
1890 1891 self.dirstate.setparents(p1, p2)
1891 1892
1892 1893 if show_stats:
1893 1894 stats = ((len(get), _("updated")),
1894 1895 (len(merge) - len(failedmerge), _("merged")),
1895 1896 (len(remove), _("removed")),
1896 1897 (len(failedmerge), _("unresolved")))
1897 1898 note = ", ".join([_("%d files %s") % s for s in stats])
1898 1899 self.ui.status("%s\n" % note)
1899 1900 if moddirstate:
1900 1901 if branch_merge:
1901 1902 if failedmerge:
1902 1903 self.ui.status(_("There are unresolved merges,"
1903 1904 " you can redo the full merge using:\n"
1904 1905 " hg update -C %s\n"
1905 1906 " hg merge %s\n"
1906 1907 % (self.changelog.rev(p1),
1907 1908 self.changelog.rev(p2))))
1908 1909 else:
1909 1910 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1910 1911 elif failedmerge:
1911 1912 self.ui.status(_("There are unresolved merges with"
1912 1913 " locally modified files.\n"))
1913 1914
1914 1915 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1915 1916 return err
1916 1917
1917 1918 def merge3(self, fn, my, other, p1, p2):
1918 1919 """perform a 3-way merge in the working directory"""
1919 1920
1920 1921 def temp(prefix, node):
1921 1922 pre = "%s~%s." % (os.path.basename(fn), prefix)
1922 1923 (fd, name) = tempfile.mkstemp(prefix=pre)
1923 1924 f = os.fdopen(fd, "wb")
1924 1925 self.wwrite(fn, fl.read(node), f)
1925 1926 f.close()
1926 1927 return name
1927 1928
1928 1929 fl = self.file(fn)
1929 1930 base = fl.ancestor(my, other)
1930 1931 a = self.wjoin(fn)
1931 1932 b = temp("base", base)
1932 1933 c = temp("other", other)
1933 1934
1934 1935 self.ui.note(_("resolving %s\n") % fn)
1935 1936 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1936 1937 (fn, short(my), short(other), short(base)))
1937 1938
1938 1939 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1939 1940 or "hgmerge")
1940 1941 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1941 1942 environ={'HG_FILE': fn,
1942 1943 'HG_MY_NODE': p1,
1943 1944 'HG_OTHER_NODE': p2,
1944 1945 'HG_FILE_MY_NODE': hex(my),
1945 1946 'HG_FILE_OTHER_NODE': hex(other),
1946 1947 'HG_FILE_BASE_NODE': hex(base)})
1947 1948 if r:
1948 1949 self.ui.warn(_("merging %s failed!\n") % fn)
1949 1950
1950 1951 os.unlink(b)
1951 1952 os.unlink(c)
1952 1953 return r
1953 1954
1954 1955 def verify(self):
1955 1956 filelinkrevs = {}
1956 1957 filenodes = {}
1957 1958 changesets = revisions = files = 0
1958 1959 errors = [0]
1959 1960 warnings = [0]
1960 1961 neededmanifests = {}
1961 1962
1962 1963 def err(msg):
1963 1964 self.ui.warn(msg + "\n")
1964 1965 errors[0] += 1
1965 1966
1966 1967 def warn(msg):
1967 1968 self.ui.warn(msg + "\n")
1968 1969 warnings[0] += 1
1969 1970
1970 1971 def checksize(obj, name):
1971 1972 d = obj.checksize()
1972 1973 if d[0]:
1973 1974 err(_("%s data length off by %d bytes") % (name, d[0]))
1974 1975 if d[1]:
1975 1976 err(_("%s index contains %d extra bytes") % (name, d[1]))
1976 1977
1977 1978 def checkversion(obj, name):
1978 1979 if obj.version != revlog.REVLOGV0:
1979 1980 if not revlogv1:
1980 1981 warn(_("warning: `%s' uses revlog format 1") % name)
1981 1982 elif revlogv1:
1982 1983 warn(_("warning: `%s' uses revlog format 0") % name)
1983 1984
1984 1985 revlogv1 = self.revlogversion != revlog.REVLOGV0
1985 1986 if self.ui.verbose or revlogv1 != self.revlogv1:
1986 1987 self.ui.status(_("repository uses revlog format %d\n") %
1987 1988 (revlogv1 and 1 or 0))
1988 1989
1989 1990 seen = {}
1990 1991 self.ui.status(_("checking changesets\n"))
1991 1992 checksize(self.changelog, "changelog")
1992 1993
1993 1994 for i in range(self.changelog.count()):
1994 1995 changesets += 1
1995 1996 n = self.changelog.node(i)
1996 1997 l = self.changelog.linkrev(n)
1997 1998 if l != i:
1998 1999 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1999 2000 if n in seen:
2000 2001 err(_("duplicate changeset at revision %d") % i)
2001 2002 seen[n] = 1
2002 2003
2003 2004 for p in self.changelog.parents(n):
2004 2005 if p not in self.changelog.nodemap:
2005 2006 err(_("changeset %s has unknown parent %s") %
2006 2007 (short(n), short(p)))
2007 2008 try:
2008 2009 changes = self.changelog.read(n)
2009 2010 except KeyboardInterrupt:
2010 2011 self.ui.warn(_("interrupted"))
2011 2012 raise
2012 2013 except Exception, inst:
2013 2014 err(_("unpacking changeset %s: %s") % (short(n), inst))
2014 2015 continue
2015 2016
2016 2017 neededmanifests[changes[0]] = n
2017 2018
2018 2019 for f in changes[3]:
2019 2020 filelinkrevs.setdefault(f, []).append(i)
2020 2021
2021 2022 seen = {}
2022 2023 self.ui.status(_("checking manifests\n"))
2023 2024 checkversion(self.manifest, "manifest")
2024 2025 checksize(self.manifest, "manifest")
2025 2026
2026 2027 for i in range(self.manifest.count()):
2027 2028 n = self.manifest.node(i)
2028 2029 l = self.manifest.linkrev(n)
2029 2030
2030 2031 if l < 0 or l >= self.changelog.count():
2031 2032 err(_("bad manifest link (%d) at revision %d") % (l, i))
2032 2033
2033 2034 if n in neededmanifests:
2034 2035 del neededmanifests[n]
2035 2036
2036 2037 if n in seen:
2037 2038 err(_("duplicate manifest at revision %d") % i)
2038 2039
2039 2040 seen[n] = 1
2040 2041
2041 2042 for p in self.manifest.parents(n):
2042 2043 if p not in self.manifest.nodemap:
2043 2044 err(_("manifest %s has unknown parent %s") %
2044 2045 (short(n), short(p)))
2045 2046
2046 2047 try:
2047 2048 delta = mdiff.patchtext(self.manifest.delta(n))
2048 2049 except KeyboardInterrupt:
2049 2050 self.ui.warn(_("interrupted"))
2050 2051 raise
2051 2052 except Exception, inst:
2052 2053 err(_("unpacking manifest %s: %s") % (short(n), inst))
2053 2054 continue
2054 2055
2055 2056 try:
2056 2057 ff = [ l.split('\0') for l in delta.splitlines() ]
2057 2058 for f, fn in ff:
2058 2059 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2059 2060 except (ValueError, TypeError), inst:
2060 2061 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2061 2062
2062 2063 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2063 2064
2064 2065 for m, c in neededmanifests.items():
2065 2066 err(_("Changeset %s refers to unknown manifest %s") %
2066 2067 (short(m), short(c)))
2067 2068 del neededmanifests
2068 2069
2069 2070 for f in filenodes:
2070 2071 if f not in filelinkrevs:
2071 2072 err(_("file %s in manifest but not in changesets") % f)
2072 2073
2073 2074 for f in filelinkrevs:
2074 2075 if f not in filenodes:
2075 2076 err(_("file %s in changeset but not in manifest") % f)
2076 2077
2077 2078 self.ui.status(_("checking files\n"))
2078 2079 ff = filenodes.keys()
2079 2080 ff.sort()
2080 2081 for f in ff:
2081 2082 if f == "/dev/null":
2082 2083 continue
2083 2084 files += 1
2084 2085 if not f:
2085 2086 err(_("file without name in manifest %s") % short(n))
2086 2087 continue
2087 2088 fl = self.file(f)
2088 2089 checkversion(fl, f)
2089 2090 checksize(fl, f)
2090 2091
2091 2092 nodes = {nullid: 1}
2092 2093 seen = {}
2093 2094 for i in range(fl.count()):
2094 2095 revisions += 1
2095 2096 n = fl.node(i)
2096 2097
2097 2098 if n in seen:
2098 2099 err(_("%s: duplicate revision %d") % (f, i))
2099 2100 if n not in filenodes[f]:
2100 2101 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2101 2102 else:
2102 2103 del filenodes[f][n]
2103 2104
2104 2105 flr = fl.linkrev(n)
2105 2106 if flr not in filelinkrevs.get(f, []):
2106 2107 err(_("%s:%s points to unexpected changeset %d")
2107 2108 % (f, short(n), flr))
2108 2109 else:
2109 2110 filelinkrevs[f].remove(flr)
2110 2111
2111 2112 # verify contents
2112 2113 try:
2113 2114 t = fl.read(n)
2114 2115 except KeyboardInterrupt:
2115 2116 self.ui.warn(_("interrupted"))
2116 2117 raise
2117 2118 except Exception, inst:
2118 2119 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2119 2120
2120 2121 # verify parents
2121 2122 (p1, p2) = fl.parents(n)
2122 2123 if p1 not in nodes:
2123 2124 err(_("file %s:%s unknown parent 1 %s") %
2124 2125 (f, short(n), short(p1)))
2125 2126 if p2 not in nodes:
2126 2127 err(_("file %s:%s unknown parent 2 %s") %
2127 2128 (f, short(n), short(p1)))
2128 2129 nodes[n] = 1
2129 2130
2130 2131 # cross-check
2131 2132 for node in filenodes[f]:
2132 2133 err(_("node %s in manifests not in %s") % (hex(node), f))
2133 2134
2134 2135 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2135 2136 (files, changesets, revisions))
2136 2137
2137 2138 if warnings[0]:
2138 2139 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2139 2140 if errors[0]:
2140 2141 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2141 2142 return 1
2142 2143
2143 2144 # used to avoid circular references so destructors work
2144 2145 def aftertrans(base):
2145 2146 p = base
2146 2147 def a():
2147 2148 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2148 2149 util.rename(os.path.join(p, "journal.dirstate"),
2149 2150 os.path.join(p, "undo.dirstate"))
2150 2151 return a
2151 2152
General Comments 0
You need to be logged in to leave comments. Login now