##// END OF EJS Templates
remove appendfile for the manifest when adding a changegroup...
Benoit Boissinot -
r2395:8ed45fb1 default
parent child Browse files
Show More
@@ -1,2122 +1,2109
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "revlog")
16 16
17 17 class localrepository(object):
18 18 def __del__(self):
19 19 self.transhandle = None
20 20 def __init__(self, parentui, path=None, create=0):
21 21 if not path:
22 22 p = os.getcwd()
23 23 while not os.path.isdir(os.path.join(p, ".hg")):
24 24 oldp = p
25 25 p = os.path.dirname(p)
26 26 if p == oldp:
27 27 raise repo.RepoError(_("no repo found"))
28 28 path = p
29 29 self.path = os.path.join(path, ".hg")
30 30
31 31 if not create and not os.path.isdir(self.path):
32 32 raise repo.RepoError(_("repository %s not found") % path)
33 33
34 34 self.root = os.path.abspath(path)
35 35 self.origroot = path
36 36 self.ui = ui.ui(parentui=parentui)
37 37 self.opener = util.opener(self.path)
38 38 self.wopener = util.opener(self.root)
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 except IOError:
43 43 pass
44 44
45 45 v = self.ui.revlogopts
46 46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 48 fl = v.get('flags', None)
49 49 flags = 0
50 50 if fl != None:
51 51 for x in fl.split():
52 52 flags |= revlog.flagstr(x)
53 53 elif self.revlogv1:
54 54 flags = revlog.REVLOG_DEFAULT_FLAGS
55 55
56 56 v = self.revlogversion | flags
57 57 self.manifest = manifest.manifest(self.opener, v)
58 58 self.changelog = changelog.changelog(self.opener, v)
59 59
60 60 # the changelog might not have the inline index flag
61 61 # on. If the format of the changelog is the same as found in
62 62 # .hgrc, apply any flags found in the .hgrc as well.
63 63 # Otherwise, just version from the changelog
64 64 v = self.changelog.version
65 65 if v == self.revlogversion:
66 66 v |= flags
67 67 self.revlogversion = v
68 68
69 69 self.tagscache = None
70 70 self.nodetagscache = None
71 71 self.encodepats = None
72 72 self.decodepats = None
73 73 self.transhandle = None
74 74
75 75 if create:
76 76 os.mkdir(self.path)
77 77 os.mkdir(self.join("data"))
78 78
79 79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80 80
81 81 def hook(self, name, throw=False, **args):
82 82 def callhook(hname, funcname):
83 83 '''call python hook. hook is callable object, looked up as
84 84 name in python module. if callable returns "true", hook
85 85 fails, else passes. if hook raises exception, treated as
86 86 hook failure. exception propagates if throw is "true".
87 87
88 88 reason for "true" meaning "hook failed" is so that
89 89 unmodified commands (e.g. mercurial.commands.update) can
90 90 be run as hooks without wrappers to convert return values.'''
91 91
92 92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 93 d = funcname.rfind('.')
94 94 if d == -1:
95 95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 96 % (hname, funcname))
97 97 modname = funcname[:d]
98 98 try:
99 99 obj = __import__(modname)
100 100 except ImportError:
101 101 raise util.Abort(_('%s hook is invalid '
102 102 '(import of "%s" failed)') %
103 103 (hname, modname))
104 104 try:
105 105 for p in funcname.split('.')[1:]:
106 106 obj = getattr(obj, p)
107 107 except AttributeError, err:
108 108 raise util.Abort(_('%s hook is invalid '
109 109 '("%s" is not defined)') %
110 110 (hname, funcname))
111 111 if not callable(obj):
112 112 raise util.Abort(_('%s hook is invalid '
113 113 '("%s" is not callable)') %
114 114 (hname, funcname))
115 115 try:
116 116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 117 except (KeyboardInterrupt, util.SignalInterrupt):
118 118 raise
119 119 except Exception, exc:
120 120 if isinstance(exc, util.Abort):
121 121 self.ui.warn(_('error: %s hook failed: %s\n') %
122 122 (hname, exc.args[0] % exc.args[1:]))
123 123 else:
124 124 self.ui.warn(_('error: %s hook raised an exception: '
125 125 '%s\n') % (hname, exc))
126 126 if throw:
127 127 raise
128 128 self.ui.print_exc()
129 129 return True
130 130 if r:
131 131 if throw:
132 132 raise util.Abort(_('%s hook failed') % hname)
133 133 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 134 return r
135 135
136 136 def runhook(name, cmd):
137 137 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 138 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
139 139 r = util.system(cmd, environ=env, cwd=self.root)
140 140 if r:
141 141 desc, r = util.explain_exit(r)
142 142 if throw:
143 143 raise util.Abort(_('%s hook %s') % (name, desc))
144 144 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
145 145 return r
146 146
147 147 r = False
148 148 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
149 149 if hname.split(".", 1)[0] == name and cmd]
150 150 hooks.sort()
151 151 for hname, cmd in hooks:
152 152 if cmd.startswith('python:'):
153 153 r = callhook(hname, cmd[7:].strip()) or r
154 154 else:
155 155 r = runhook(hname, cmd) or r
156 156 return r
157 157
158 158 def tags(self):
159 159 '''return a mapping of tag to node'''
160 160 if not self.tagscache:
161 161 self.tagscache = {}
162 162
163 163 def parsetag(line, context):
164 164 if not line:
165 165 return
166 166 s = l.split(" ", 1)
167 167 if len(s) != 2:
168 168 self.ui.warn(_("%s: cannot parse entry\n") % context)
169 169 return
170 170 node, key = s
171 171 key = key.strip()
172 172 try:
173 173 bin_n = bin(node)
174 174 except TypeError:
175 175 self.ui.warn(_("%s: node '%s' is not well formed\n") %
176 176 (context, node))
177 177 return
178 178 if bin_n not in self.changelog.nodemap:
179 179 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
180 180 (context, key))
181 181 return
182 182 self.tagscache[key] = bin_n
183 183
184 184 # read the tags file from each head, ending with the tip,
185 185 # and add each tag found to the map, with "newer" ones
186 186 # taking precedence
187 187 heads = self.heads()
188 188 heads.reverse()
189 189 fl = self.file(".hgtags")
190 190 for node in heads:
191 191 change = self.changelog.read(node)
192 192 rev = self.changelog.rev(node)
193 193 fn, ff = self.manifest.find(change[0], '.hgtags')
194 194 if fn is None: continue
195 195 count = 0
196 196 for l in fl.read(fn).splitlines():
197 197 count += 1
198 198 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
199 199 (rev, short(node), count))
200 200 try:
201 201 f = self.opener("localtags")
202 202 count = 0
203 203 for l in f:
204 204 count += 1
205 205 parsetag(l, _("localtags, line %d") % count)
206 206 except IOError:
207 207 pass
208 208
209 209 self.tagscache['tip'] = self.changelog.tip()
210 210
211 211 return self.tagscache
212 212
213 213 def tagslist(self):
214 214 '''return a list of tags ordered by revision'''
215 215 l = []
216 216 for t, n in self.tags().items():
217 217 try:
218 218 r = self.changelog.rev(n)
219 219 except:
220 220 r = -2 # sort to the beginning of the list if unknown
221 221 l.append((r, t, n))
222 222 l.sort()
223 223 return [(t, n) for r, t, n in l]
224 224
225 225 def nodetags(self, node):
226 226 '''return the tags associated with a node'''
227 227 if not self.nodetagscache:
228 228 self.nodetagscache = {}
229 229 for t, n in self.tags().items():
230 230 self.nodetagscache.setdefault(n, []).append(t)
231 231 return self.nodetagscache.get(node, [])
232 232
233 233 def lookup(self, key):
234 234 try:
235 235 return self.tags()[key]
236 236 except KeyError:
237 237 try:
238 238 return self.changelog.lookup(key)
239 239 except:
240 240 raise repo.RepoError(_("unknown revision '%s'") % key)
241 241
242 242 def dev(self):
243 243 return os.stat(self.path).st_dev
244 244
245 245 def local(self):
246 246 return True
247 247
248 248 def join(self, f):
249 249 return os.path.join(self.path, f)
250 250
251 251 def wjoin(self, f):
252 252 return os.path.join(self.root, f)
253 253
254 254 def file(self, f):
255 255 if f[0] == '/':
256 256 f = f[1:]
257 257 return filelog.filelog(self.opener, f, self.revlogversion)
258 258
259 259 def getcwd(self):
260 260 return self.dirstate.getcwd()
261 261
262 262 def wfile(self, f, mode='r'):
263 263 return self.wopener(f, mode)
264 264
265 265 def wread(self, filename):
266 266 if self.encodepats == None:
267 267 l = []
268 268 for pat, cmd in self.ui.configitems("encode"):
269 269 mf = util.matcher(self.root, "", [pat], [], [])[1]
270 270 l.append((mf, cmd))
271 271 self.encodepats = l
272 272
273 273 data = self.wopener(filename, 'r').read()
274 274
275 275 for mf, cmd in self.encodepats:
276 276 if mf(filename):
277 277 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
278 278 data = util.filter(data, cmd)
279 279 break
280 280
281 281 return data
282 282
283 283 def wwrite(self, filename, data, fd=None):
284 284 if self.decodepats == None:
285 285 l = []
286 286 for pat, cmd in self.ui.configitems("decode"):
287 287 mf = util.matcher(self.root, "", [pat], [], [])[1]
288 288 l.append((mf, cmd))
289 289 self.decodepats = l
290 290
291 291 for mf, cmd in self.decodepats:
292 292 if mf(filename):
293 293 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
294 294 data = util.filter(data, cmd)
295 295 break
296 296
297 297 if fd:
298 298 return fd.write(data)
299 299 return self.wopener(filename, 'w').write(data)
300 300
301 301 def transaction(self):
302 302 tr = self.transhandle
303 303 if tr != None and tr.running():
304 304 return tr.nest()
305 305
306 306 # save dirstate for rollback
307 307 try:
308 308 ds = self.opener("dirstate").read()
309 309 except IOError:
310 310 ds = ""
311 311 self.opener("journal.dirstate", "w").write(ds)
312 312
313 313 tr = transaction.transaction(self.ui.warn, self.opener,
314 314 self.join("journal"),
315 315 aftertrans(self.path))
316 316 self.transhandle = tr
317 317 return tr
318 318
319 319 def recover(self):
320 320 l = self.lock()
321 321 if os.path.exists(self.join("journal")):
322 322 self.ui.status(_("rolling back interrupted transaction\n"))
323 323 transaction.rollback(self.opener, self.join("journal"))
324 324 self.reload()
325 325 return True
326 326 else:
327 327 self.ui.warn(_("no interrupted transaction available\n"))
328 328 return False
329 329
330 330 def rollback(self, wlock=None):
331 331 if not wlock:
332 332 wlock = self.wlock()
333 333 l = self.lock()
334 334 if os.path.exists(self.join("undo")):
335 335 self.ui.status(_("rolling back last transaction\n"))
336 336 transaction.rollback(self.opener, self.join("undo"))
337 337 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
338 338 self.reload()
339 339 self.wreload()
340 340 else:
341 341 self.ui.warn(_("no rollback information available\n"))
342 342
343 343 def wreload(self):
344 344 self.dirstate.read()
345 345
346 346 def reload(self):
347 347 self.changelog.load()
348 348 self.manifest.load()
349 349 self.tagscache = None
350 350 self.nodetagscache = None
351 351
352 352 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
353 353 desc=None):
354 354 try:
355 355 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
356 356 except lock.LockHeld, inst:
357 357 if not wait:
358 358 raise
359 359 self.ui.warn(_("waiting for lock on %s held by %s\n") %
360 360 (desc, inst.args[0]))
361 361 # default to 600 seconds timeout
362 362 l = lock.lock(self.join(lockname),
363 363 int(self.ui.config("ui", "timeout") or 600),
364 364 releasefn, desc=desc)
365 365 if acquirefn:
366 366 acquirefn()
367 367 return l
368 368
369 369 def lock(self, wait=1):
370 370 return self.do_lock("lock", wait, acquirefn=self.reload,
371 371 desc=_('repository %s') % self.origroot)
372 372
373 373 def wlock(self, wait=1):
374 374 return self.do_lock("wlock", wait, self.dirstate.write,
375 375 self.wreload,
376 376 desc=_('working directory of %s') % self.origroot)
377 377
378 378 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
379 379 "determine whether a new filenode is needed"
380 380 fp1 = manifest1.get(filename, nullid)
381 381 fp2 = manifest2.get(filename, nullid)
382 382
383 383 if fp2 != nullid:
384 384 # is one parent an ancestor of the other?
385 385 fpa = filelog.ancestor(fp1, fp2)
386 386 if fpa == fp1:
387 387 fp1, fp2 = fp2, nullid
388 388 elif fpa == fp2:
389 389 fp2 = nullid
390 390
391 391 # is the file unmodified from the parent? report existing entry
392 392 if fp2 == nullid and text == filelog.read(fp1):
393 393 return (fp1, None, None)
394 394
395 395 return (None, fp1, fp2)
396 396
397 397 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
398 398 orig_parent = self.dirstate.parents()[0] or nullid
399 399 p1 = p1 or self.dirstate.parents()[0] or nullid
400 400 p2 = p2 or self.dirstate.parents()[1] or nullid
401 401 c1 = self.changelog.read(p1)
402 402 c2 = self.changelog.read(p2)
403 403 m1 = self.manifest.read(c1[0])
404 404 mf1 = self.manifest.readflags(c1[0])
405 405 m2 = self.manifest.read(c2[0])
406 406 changed = []
407 407
408 408 if orig_parent == p1:
409 409 update_dirstate = 1
410 410 else:
411 411 update_dirstate = 0
412 412
413 413 if not wlock:
414 414 wlock = self.wlock()
415 415 l = self.lock()
416 416 tr = self.transaction()
417 417 mm = m1.copy()
418 418 mfm = mf1.copy()
419 419 linkrev = self.changelog.count()
420 420 for f in files:
421 421 try:
422 422 t = self.wread(f)
423 423 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
424 424 r = self.file(f)
425 425 mfm[f] = tm
426 426
427 427 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
428 428 if entry:
429 429 mm[f] = entry
430 430 continue
431 431
432 432 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
433 433 changed.append(f)
434 434 if update_dirstate:
435 435 self.dirstate.update([f], "n")
436 436 except IOError:
437 437 try:
438 438 del mm[f]
439 439 del mfm[f]
440 440 if update_dirstate:
441 441 self.dirstate.forget([f])
442 442 except:
443 443 # deleted from p2?
444 444 pass
445 445
446 446 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
447 447 user = user or self.ui.username()
448 448 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
449 449 tr.close()
450 450 if update_dirstate:
451 451 self.dirstate.setparents(n, nullid)
452 452
453 453 def commit(self, files=None, text="", user=None, date=None,
454 454 match=util.always, force=False, lock=None, wlock=None,
455 455 force_editor=False):
456 456 commit = []
457 457 remove = []
458 458 changed = []
459 459
460 460 if files:
461 461 for f in files:
462 462 s = self.dirstate.state(f)
463 463 if s in 'nmai':
464 464 commit.append(f)
465 465 elif s == 'r':
466 466 remove.append(f)
467 467 else:
468 468 self.ui.warn(_("%s not tracked!\n") % f)
469 469 else:
470 470 modified, added, removed, deleted, unknown = self.changes(match=match)
471 471 commit = modified + added
472 472 remove = removed
473 473
474 474 p1, p2 = self.dirstate.parents()
475 475 c1 = self.changelog.read(p1)
476 476 c2 = self.changelog.read(p2)
477 477 m1 = self.manifest.read(c1[0])
478 478 mf1 = self.manifest.readflags(c1[0])
479 479 m2 = self.manifest.read(c2[0])
480 480
481 481 if not commit and not remove and not force and p2 == nullid:
482 482 self.ui.status(_("nothing changed\n"))
483 483 return None
484 484
485 485 xp1 = hex(p1)
486 486 if p2 == nullid: xp2 = ''
487 487 else: xp2 = hex(p2)
488 488
489 489 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
490 490
491 491 if not wlock:
492 492 wlock = self.wlock()
493 493 if not lock:
494 494 lock = self.lock()
495 495 tr = self.transaction()
496 496
497 497 # check in files
498 498 new = {}
499 499 linkrev = self.changelog.count()
500 500 commit.sort()
501 501 for f in commit:
502 502 self.ui.note(f + "\n")
503 503 try:
504 504 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
505 505 t = self.wread(f)
506 506 except IOError:
507 507 self.ui.warn(_("trouble committing %s!\n") % f)
508 508 raise
509 509
510 510 r = self.file(f)
511 511
512 512 meta = {}
513 513 cp = self.dirstate.copied(f)
514 514 if cp:
515 515 meta["copy"] = cp
516 516 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
517 517 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
518 518 fp1, fp2 = nullid, nullid
519 519 else:
520 520 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
521 521 if entry:
522 522 new[f] = entry
523 523 continue
524 524
525 525 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
526 526 # remember what we've added so that we can later calculate
527 527 # the files to pull from a set of changesets
528 528 changed.append(f)
529 529
530 530 # update manifest
531 531 m1 = m1.copy()
532 532 m1.update(new)
533 533 for f in remove:
534 534 if f in m1:
535 535 del m1[f]
536 536 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
537 537 (new, remove))
538 538
539 539 # add changeset
540 540 new = new.keys()
541 541 new.sort()
542 542
543 543 user = user or self.ui.username()
544 544 if not text or force_editor:
545 545 edittext = []
546 546 if text:
547 547 edittext.append(text)
548 548 edittext.append("")
549 549 if p2 != nullid:
550 550 edittext.append("HG: branch merge")
551 551 edittext.extend(["HG: changed %s" % f for f in changed])
552 552 edittext.extend(["HG: removed %s" % f for f in remove])
553 553 if not changed and not remove:
554 554 edittext.append("HG: no files changed")
555 555 edittext.append("")
556 556 # run editor in the repository root
557 557 olddir = os.getcwd()
558 558 os.chdir(self.root)
559 559 text = self.ui.edit("\n".join(edittext), user)
560 560 os.chdir(olddir)
561 561
562 562 lines = [line.rstrip() for line in text.rstrip().splitlines()]
563 563 while lines and not lines[0]:
564 564 del lines[0]
565 565 if not lines:
566 566 return None
567 567 text = '\n'.join(lines)
568 568 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
569 569 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
570 570 parent2=xp2)
571 571 tr.close()
572 572
573 573 self.dirstate.setparents(n)
574 574 self.dirstate.update(new, "n")
575 575 self.dirstate.forget(remove)
576 576
577 577 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
578 578 return n
579 579
580 580 def walk(self, node=None, files=[], match=util.always, badmatch=None):
581 581 if node:
582 582 fdict = dict.fromkeys(files)
583 583 for fn in self.manifest.read(self.changelog.read(node)[0]):
584 584 fdict.pop(fn, None)
585 585 if match(fn):
586 586 yield 'm', fn
587 587 for fn in fdict:
588 588 if badmatch and badmatch(fn):
589 589 if match(fn):
590 590 yield 'b', fn
591 591 else:
592 592 self.ui.warn(_('%s: No such file in rev %s\n') % (
593 593 util.pathto(self.getcwd(), fn), short(node)))
594 594 else:
595 595 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
596 596 yield src, fn
597 597
598 598 def changes(self, node1=None, node2=None, files=[], match=util.always,
599 599 wlock=None, show_ignored=None):
600 600 """return changes between two nodes or node and working directory
601 601
602 602 If node1 is None, use the first dirstate parent instead.
603 603 If node2 is None, compare node1 with working directory.
604 604 """
605 605
606 606 def fcmp(fn, mf):
607 607 t1 = self.wread(fn)
608 608 t2 = self.file(fn).read(mf.get(fn, nullid))
609 609 return cmp(t1, t2)
610 610
611 611 def mfmatches(node):
612 612 change = self.changelog.read(node)
613 613 mf = dict(self.manifest.read(change[0]))
614 614 for fn in mf.keys():
615 615 if not match(fn):
616 616 del mf[fn]
617 617 return mf
618 618
619 619 if node1:
620 620 # read the manifest from node1 before the manifest from node2,
621 621 # so that we'll hit the manifest cache if we're going through
622 622 # all the revisions in parent->child order.
623 623 mf1 = mfmatches(node1)
624 624
625 625 # are we comparing the working directory?
626 626 if not node2:
627 627 if not wlock:
628 628 try:
629 629 wlock = self.wlock(wait=0)
630 630 except lock.LockException:
631 631 wlock = None
632 632 lookup, modified, added, removed, deleted, unknown, ignored = (
633 633 self.dirstate.changes(files, match, show_ignored))
634 634
635 635 # are we comparing working dir against its parent?
636 636 if not node1:
637 637 if lookup:
638 638 # do a full compare of any files that might have changed
639 639 mf2 = mfmatches(self.dirstate.parents()[0])
640 640 for f in lookup:
641 641 if fcmp(f, mf2):
642 642 modified.append(f)
643 643 elif wlock is not None:
644 644 self.dirstate.update([f], "n")
645 645 else:
646 646 # we are comparing working dir against non-parent
647 647 # generate a pseudo-manifest for the working dir
648 648 mf2 = mfmatches(self.dirstate.parents()[0])
649 649 for f in lookup + modified + added:
650 650 mf2[f] = ""
651 651 for f in removed:
652 652 if f in mf2:
653 653 del mf2[f]
654 654 else:
655 655 # we are comparing two revisions
656 656 deleted, unknown, ignored = [], [], []
657 657 mf2 = mfmatches(node2)
658 658
659 659 if node1:
660 660 # flush lists from dirstate before comparing manifests
661 661 modified, added = [], []
662 662
663 663 for fn in mf2:
664 664 if mf1.has_key(fn):
665 665 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
666 666 modified.append(fn)
667 667 del mf1[fn]
668 668 else:
669 669 added.append(fn)
670 670
671 671 removed = mf1.keys()
672 672
673 673 # sort and return results:
674 674 for l in modified, added, removed, deleted, unknown, ignored:
675 675 l.sort()
676 676 if show_ignored is None:
677 677 return (modified, added, removed, deleted, unknown)
678 678 else:
679 679 return (modified, added, removed, deleted, unknown, ignored)
680 680
681 681 def add(self, list, wlock=None):
682 682 if not wlock:
683 683 wlock = self.wlock()
684 684 for f in list:
685 685 p = self.wjoin(f)
686 686 if not os.path.exists(p):
687 687 self.ui.warn(_("%s does not exist!\n") % f)
688 688 elif not os.path.isfile(p):
689 689 self.ui.warn(_("%s not added: only files supported currently\n")
690 690 % f)
691 691 elif self.dirstate.state(f) in 'an':
692 692 self.ui.warn(_("%s already tracked!\n") % f)
693 693 else:
694 694 self.dirstate.update([f], "a")
695 695
696 696 def forget(self, list, wlock=None):
697 697 if not wlock:
698 698 wlock = self.wlock()
699 699 for f in list:
700 700 if self.dirstate.state(f) not in 'ai':
701 701 self.ui.warn(_("%s not added!\n") % f)
702 702 else:
703 703 self.dirstate.forget([f])
704 704
705 705 def remove(self, list, unlink=False, wlock=None):
706 706 if unlink:
707 707 for f in list:
708 708 try:
709 709 util.unlink(self.wjoin(f))
710 710 except OSError, inst:
711 711 if inst.errno != errno.ENOENT:
712 712 raise
713 713 if not wlock:
714 714 wlock = self.wlock()
715 715 for f in list:
716 716 p = self.wjoin(f)
717 717 if os.path.exists(p):
718 718 self.ui.warn(_("%s still exists!\n") % f)
719 719 elif self.dirstate.state(f) == 'a':
720 720 self.dirstate.forget([f])
721 721 elif f not in self.dirstate:
722 722 self.ui.warn(_("%s not tracked!\n") % f)
723 723 else:
724 724 self.dirstate.update([f], "r")
725 725
726 726 def undelete(self, list, wlock=None):
727 727 p = self.dirstate.parents()[0]
728 728 mn = self.changelog.read(p)[0]
729 729 mf = self.manifest.readflags(mn)
730 730 m = self.manifest.read(mn)
731 731 if not wlock:
732 732 wlock = self.wlock()
733 733 for f in list:
734 734 if self.dirstate.state(f) not in "r":
735 735 self.ui.warn("%s not removed!\n" % f)
736 736 else:
737 737 t = self.file(f).read(m[f])
738 738 self.wwrite(f, t)
739 739 util.set_exec(self.wjoin(f), mf[f])
740 740 self.dirstate.update([f], "n")
741 741
742 742 def copy(self, source, dest, wlock=None):
743 743 p = self.wjoin(dest)
744 744 if not os.path.exists(p):
745 745 self.ui.warn(_("%s does not exist!\n") % dest)
746 746 elif not os.path.isfile(p):
747 747 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
748 748 else:
749 749 if not wlock:
750 750 wlock = self.wlock()
751 751 if self.dirstate.state(dest) == '?':
752 752 self.dirstate.update([dest], "a")
753 753 self.dirstate.copy(source, dest)
754 754
755 755 def heads(self, start=None):
756 756 heads = self.changelog.heads(start)
757 757 # sort the output in rev descending order
758 758 heads = [(-self.changelog.rev(h), h) for h in heads]
759 759 heads.sort()
760 760 return [n for (r, n) in heads]
761 761
762 762 # branchlookup returns a dict giving a list of branches for
763 763 # each head. A branch is defined as the tag of a node or
764 764 # the branch of the node's parents. If a node has multiple
765 765 # branch tags, tags are eliminated if they are visible from other
766 766 # branch tags.
767 767 #
768 768 # So, for this graph: a->b->c->d->e
769 769 # \ /
770 770 # aa -----/
771 771 # a has tag 2.6.12
772 772 # d has tag 2.6.13
773 773 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
774 774 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
775 775 # from the list.
776 776 #
777 777 # It is possible that more than one head will have the same branch tag.
778 778 # callers need to check the result for multiple heads under the same
779 779 # branch tag if that is a problem for them (ie checkout of a specific
780 780 # branch).
781 781 #
782 782 # passing in a specific branch will limit the depth of the search
783 783 # through the parents. It won't limit the branches returned in the
784 784 # result though.
785 785 def branchlookup(self, heads=None, branch=None):
786 786 if not heads:
787 787 heads = self.heads()
788 788 headt = [ h for h in heads ]
789 789 chlog = self.changelog
790 790 branches = {}
791 791 merges = []
792 792 seenmerge = {}
793 793
794 794 # traverse the tree once for each head, recording in the branches
795 795 # dict which tags are visible from this head. The branches
796 796 # dict also records which tags are visible from each tag
797 797 # while we traverse.
798 798 while headt or merges:
799 799 if merges:
800 800 n, found = merges.pop()
801 801 visit = [n]
802 802 else:
803 803 h = headt.pop()
804 804 visit = [h]
805 805 found = [h]
806 806 seen = {}
807 807 while visit:
808 808 n = visit.pop()
809 809 if n in seen:
810 810 continue
811 811 pp = chlog.parents(n)
812 812 tags = self.nodetags(n)
813 813 if tags:
814 814 for x in tags:
815 815 if x == 'tip':
816 816 continue
817 817 for f in found:
818 818 branches.setdefault(f, {})[n] = 1
819 819 branches.setdefault(n, {})[n] = 1
820 820 break
821 821 if n not in found:
822 822 found.append(n)
823 823 if branch in tags:
824 824 continue
825 825 seen[n] = 1
826 826 if pp[1] != nullid and n not in seenmerge:
827 827 merges.append((pp[1], [x for x in found]))
828 828 seenmerge[n] = 1
829 829 if pp[0] != nullid:
830 830 visit.append(pp[0])
831 831 # traverse the branches dict, eliminating branch tags from each
832 832 # head that are visible from another branch tag for that head.
833 833 out = {}
834 834 viscache = {}
835 835 for h in heads:
836 836 def visible(node):
837 837 if node in viscache:
838 838 return viscache[node]
839 839 ret = {}
840 840 visit = [node]
841 841 while visit:
842 842 x = visit.pop()
843 843 if x in viscache:
844 844 ret.update(viscache[x])
845 845 elif x not in ret:
846 846 ret[x] = 1
847 847 if x in branches:
848 848 visit[len(visit):] = branches[x].keys()
849 849 viscache[node] = ret
850 850 return ret
851 851 if h not in branches:
852 852 continue
853 853 # O(n^2), but somewhat limited. This only searches the
854 854 # tags visible from a specific head, not all the tags in the
855 855 # whole repo.
856 856 for b in branches[h]:
857 857 vis = False
858 858 for bb in branches[h].keys():
859 859 if b != bb:
860 860 if b in visible(bb):
861 861 vis = True
862 862 break
863 863 if not vis:
864 864 l = out.setdefault(h, [])
865 865 l[len(l):] = self.nodetags(b)
866 866 return out
867 867
868 868 def branches(self, nodes):
869 869 if not nodes:
870 870 nodes = [self.changelog.tip()]
871 871 b = []
872 872 for n in nodes:
873 873 t = n
874 874 while 1:
875 875 p = self.changelog.parents(n)
876 876 if p[1] != nullid or p[0] == nullid:
877 877 b.append((t, n, p[0], p[1]))
878 878 break
879 879 n = p[0]
880 880 return b
881 881
882 882 def between(self, pairs):
883 883 r = []
884 884
885 885 for top, bottom in pairs:
886 886 n, l, i = top, [], 0
887 887 f = 1
888 888
889 889 while n != bottom:
890 890 p = self.changelog.parents(n)[0]
891 891 if i == f:
892 892 l.append(n)
893 893 f = f * 2
894 894 n = p
895 895 i += 1
896 896
897 897 r.append(l)
898 898
899 899 return r
900 900
901 901 def findincoming(self, remote, base=None, heads=None, force=False):
902 902 """Return list of roots of the subsets of missing nodes from remote
903 903
904 904 If base dict is specified, assume that these nodes and their parents
905 905 exist on the remote side and that no child of a node of base exists
906 906 in both remote and self.
907 907 Furthermore base will be updated to include the nodes that exists
908 908 in self and remote but no children exists in self and remote.
909 909 If a list of heads is specified, return only nodes which are heads
910 910 or ancestors of these heads.
911 911
912 912 All the ancestors of base are in self and in remote.
913 913 All the descendants of the list returned are missing in self.
914 914 (and so we know that the rest of the nodes are missing in remote, see
915 915 outgoing)
916 916 """
917 917 m = self.changelog.nodemap
918 918 search = []
919 919 fetch = {}
920 920 seen = {}
921 921 seenbranch = {}
922 922 if base == None:
923 923 base = {}
924 924
925 925 if not heads:
926 926 heads = remote.heads()
927 927
928 928 if self.changelog.tip() == nullid:
929 929 base[nullid] = 1
930 930 if heads != [nullid]:
931 931 return [nullid]
932 932 return []
933 933
934 934 # assume we're closer to the tip than the root
935 935 # and start by examining the heads
936 936 self.ui.status(_("searching for changes\n"))
937 937
938 938 unknown = []
939 939 for h in heads:
940 940 if h not in m:
941 941 unknown.append(h)
942 942 else:
943 943 base[h] = 1
944 944
945 945 if not unknown:
946 946 return []
947 947
948 948 req = dict.fromkeys(unknown)
949 949 reqcnt = 0
950 950
951 951 # search through remote branches
952 952 # a 'branch' here is a linear segment of history, with four parts:
953 953 # head, root, first parent, second parent
954 954 # (a branch always has two parents (or none) by definition)
955 955 unknown = remote.branches(unknown)
956 956 while unknown:
957 957 r = []
958 958 while unknown:
959 959 n = unknown.pop(0)
960 960 if n[0] in seen:
961 961 continue
962 962
963 963 self.ui.debug(_("examining %s:%s\n")
964 964 % (short(n[0]), short(n[1])))
965 965 if n[0] == nullid: # found the end of the branch
966 966 pass
967 967 elif n in seenbranch:
968 968 self.ui.debug(_("branch already found\n"))
969 969 continue
970 970 elif n[1] and n[1] in m: # do we know the base?
971 971 self.ui.debug(_("found incomplete branch %s:%s\n")
972 972 % (short(n[0]), short(n[1])))
973 973 search.append(n) # schedule branch range for scanning
974 974 seenbranch[n] = 1
975 975 else:
976 976 if n[1] not in seen and n[1] not in fetch:
977 977 if n[2] in m and n[3] in m:
978 978 self.ui.debug(_("found new changeset %s\n") %
979 979 short(n[1]))
980 980 fetch[n[1]] = 1 # earliest unknown
981 981 for p in n[2:4]:
982 982 if p in m:
983 983 base[p] = 1 # latest known
984 984
985 985 for p in n[2:4]:
986 986 if p not in req and p not in m:
987 987 r.append(p)
988 988 req[p] = 1
989 989 seen[n[0]] = 1
990 990
991 991 if r:
992 992 reqcnt += 1
993 993 self.ui.debug(_("request %d: %s\n") %
994 994 (reqcnt, " ".join(map(short, r))))
995 995 for p in range(0, len(r), 10):
996 996 for b in remote.branches(r[p:p+10]):
997 997 self.ui.debug(_("received %s:%s\n") %
998 998 (short(b[0]), short(b[1])))
999 999 unknown.append(b)
1000 1000
1001 1001 # do binary search on the branches we found
1002 1002 while search:
1003 1003 n = search.pop(0)
1004 1004 reqcnt += 1
1005 1005 l = remote.between([(n[0], n[1])])[0]
1006 1006 l.append(n[1])
1007 1007 p = n[0]
1008 1008 f = 1
1009 1009 for i in l:
1010 1010 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1011 1011 if i in m:
1012 1012 if f <= 2:
1013 1013 self.ui.debug(_("found new branch changeset %s\n") %
1014 1014 short(p))
1015 1015 fetch[p] = 1
1016 1016 base[i] = 1
1017 1017 else:
1018 1018 self.ui.debug(_("narrowed branch search to %s:%s\n")
1019 1019 % (short(p), short(i)))
1020 1020 search.append((p, i))
1021 1021 break
1022 1022 p, f = i, f * 2
1023 1023
1024 1024 # sanity check our fetch list
1025 1025 for f in fetch.keys():
1026 1026 if f in m:
1027 1027 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1028 1028
1029 1029 if base.keys() == [nullid]:
1030 1030 if force:
1031 1031 self.ui.warn(_("warning: repository is unrelated\n"))
1032 1032 else:
1033 1033 raise util.Abort(_("repository is unrelated"))
1034 1034
1035 1035 self.ui.note(_("found new changesets starting at ") +
1036 1036 " ".join([short(f) for f in fetch]) + "\n")
1037 1037
1038 1038 self.ui.debug(_("%d total queries\n") % reqcnt)
1039 1039
1040 1040 return fetch.keys()
1041 1041
1042 1042 def findoutgoing(self, remote, base=None, heads=None, force=False):
1043 1043 """Return list of nodes that are roots of subsets not in remote
1044 1044
1045 1045 If base dict is specified, assume that these nodes and their parents
1046 1046 exist on the remote side.
1047 1047 If a list of heads is specified, return only nodes which are heads
1048 1048 or ancestors of these heads, and return a second element which
1049 1049 contains all remote heads which get new children.
1050 1050 """
1051 1051 if base == None:
1052 1052 base = {}
1053 1053 self.findincoming(remote, base, heads, force=force)
1054 1054
1055 1055 self.ui.debug(_("common changesets up to ")
1056 1056 + " ".join(map(short, base.keys())) + "\n")
1057 1057
1058 1058 remain = dict.fromkeys(self.changelog.nodemap)
1059 1059
1060 1060 # prune everything remote has from the tree
1061 1061 del remain[nullid]
1062 1062 remove = base.keys()
1063 1063 while remove:
1064 1064 n = remove.pop(0)
1065 1065 if n in remain:
1066 1066 del remain[n]
1067 1067 for p in self.changelog.parents(n):
1068 1068 remove.append(p)
1069 1069
1070 1070 # find every node whose parents have been pruned
1071 1071 subset = []
1072 1072 # find every remote head that will get new children
1073 1073 updated_heads = {}
1074 1074 for n in remain:
1075 1075 p1, p2 = self.changelog.parents(n)
1076 1076 if p1 not in remain and p2 not in remain:
1077 1077 subset.append(n)
1078 1078 if heads:
1079 1079 if p1 in heads:
1080 1080 updated_heads[p1] = True
1081 1081 if p2 in heads:
1082 1082 updated_heads[p2] = True
1083 1083
1084 1084 # this is the set of all roots we have to push
1085 1085 if heads:
1086 1086 return subset, updated_heads.keys()
1087 1087 else:
1088 1088 return subset
1089 1089
1090 1090 def pull(self, remote, heads=None, force=False):
1091 1091 l = self.lock()
1092 1092
1093 1093 fetch = self.findincoming(remote, force=force)
1094 1094 if fetch == [nullid]:
1095 1095 self.ui.status(_("requesting all changes\n"))
1096 1096
1097 1097 if not fetch:
1098 1098 self.ui.status(_("no changes found\n"))
1099 1099 return 0
1100 1100
1101 1101 if heads is None:
1102 1102 cg = remote.changegroup(fetch, 'pull')
1103 1103 else:
1104 1104 cg = remote.changegroupsubset(fetch, heads, 'pull')
1105 1105 return self.addchangegroup(cg, 'pull')
1106 1106
1107 1107 def push(self, remote, force=False, revs=None):
1108 1108 lock = remote.lock()
1109 1109
1110 1110 base = {}
1111 1111 remote_heads = remote.heads()
1112 1112 inc = self.findincoming(remote, base, remote_heads, force=force)
1113 1113 if not force and inc:
1114 1114 self.ui.warn(_("abort: unsynced remote changes!\n"))
1115 1115 self.ui.status(_("(did you forget to sync?"
1116 1116 " use push -f to force)\n"))
1117 1117 return 1
1118 1118
1119 1119 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1120 1120 if revs is not None:
1121 1121 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1122 1122 else:
1123 1123 bases, heads = update, self.changelog.heads()
1124 1124
1125 1125 if not bases:
1126 1126 self.ui.status(_("no changes found\n"))
1127 1127 return 1
1128 1128 elif not force:
1129 1129 # FIXME we don't properly detect creation of new heads
1130 1130 # in the push -r case, assume the user knows what he's doing
1131 1131 if not revs and len(remote_heads) < len(heads) \
1132 1132 and remote_heads != [nullid]:
1133 1133 self.ui.warn(_("abort: push creates new remote branches!\n"))
1134 1134 self.ui.status(_("(did you forget to merge?"
1135 1135 " use push -f to force)\n"))
1136 1136 return 1
1137 1137
1138 1138 if revs is None:
1139 1139 cg = self.changegroup(update, 'push')
1140 1140 else:
1141 1141 cg = self.changegroupsubset(update, revs, 'push')
1142 1142 return remote.addchangegroup(cg, 'push')
1143 1143
1144 1144 def changegroupsubset(self, bases, heads, source):
1145 1145 """This function generates a changegroup consisting of all the nodes
1146 1146 that are descendents of any of the bases, and ancestors of any of
1147 1147 the heads.
1148 1148
1149 1149 It is fairly complex as determining which filenodes and which
1150 1150 manifest nodes need to be included for the changeset to be complete
1151 1151 is non-trivial.
1152 1152
1153 1153 Another wrinkle is doing the reverse, figuring out which changeset in
1154 1154 the changegroup a particular filenode or manifestnode belongs to."""
1155 1155
1156 1156 self.hook('preoutgoing', throw=True, source=source)
1157 1157
1158 1158 # Set up some initial variables
1159 1159 # Make it easy to refer to self.changelog
1160 1160 cl = self.changelog
1161 1161 # msng is short for missing - compute the list of changesets in this
1162 1162 # changegroup.
1163 1163 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1164 1164 # Some bases may turn out to be superfluous, and some heads may be
1165 1165 # too. nodesbetween will return the minimal set of bases and heads
1166 1166 # necessary to re-create the changegroup.
1167 1167
1168 1168 # Known heads are the list of heads that it is assumed the recipient
1169 1169 # of this changegroup will know about.
1170 1170 knownheads = {}
1171 1171 # We assume that all parents of bases are known heads.
1172 1172 for n in bases:
1173 1173 for p in cl.parents(n):
1174 1174 if p != nullid:
1175 1175 knownheads[p] = 1
1176 1176 knownheads = knownheads.keys()
1177 1177 if knownheads:
1178 1178 # Now that we know what heads are known, we can compute which
1179 1179 # changesets are known. The recipient must know about all
1180 1180 # changesets required to reach the known heads from the null
1181 1181 # changeset.
1182 1182 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1183 1183 junk = None
1184 1184 # Transform the list into an ersatz set.
1185 1185 has_cl_set = dict.fromkeys(has_cl_set)
1186 1186 else:
1187 1187 # If there were no known heads, the recipient cannot be assumed to
1188 1188 # know about any changesets.
1189 1189 has_cl_set = {}
1190 1190
1191 1191 # Make it easy to refer to self.manifest
1192 1192 mnfst = self.manifest
1193 1193 # We don't know which manifests are missing yet
1194 1194 msng_mnfst_set = {}
1195 1195 # Nor do we know which filenodes are missing.
1196 1196 msng_filenode_set = {}
1197 1197
1198 1198 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1199 1199 junk = None
1200 1200
1201 1201 # A changeset always belongs to itself, so the changenode lookup
1202 1202 # function for a changenode is identity.
1203 1203 def identity(x):
1204 1204 return x
1205 1205
1206 1206 # A function generating function. Sets up an environment for the
1207 1207 # inner function.
1208 1208 def cmp_by_rev_func(revlog):
1209 1209 # Compare two nodes by their revision number in the environment's
1210 1210 # revision history. Since the revision number both represents the
1211 1211 # most efficient order to read the nodes in, and represents a
1212 1212 # topological sorting of the nodes, this function is often useful.
1213 1213 def cmp_by_rev(a, b):
1214 1214 return cmp(revlog.rev(a), revlog.rev(b))
1215 1215 return cmp_by_rev
1216 1216
1217 1217 # If we determine that a particular file or manifest node must be a
1218 1218 # node that the recipient of the changegroup will already have, we can
1219 1219 # also assume the recipient will have all the parents. This function
1220 1220 # prunes them from the set of missing nodes.
1221 1221 def prune_parents(revlog, hasset, msngset):
1222 1222 haslst = hasset.keys()
1223 1223 haslst.sort(cmp_by_rev_func(revlog))
1224 1224 for node in haslst:
1225 1225 parentlst = [p for p in revlog.parents(node) if p != nullid]
1226 1226 while parentlst:
1227 1227 n = parentlst.pop()
1228 1228 if n not in hasset:
1229 1229 hasset[n] = 1
1230 1230 p = [p for p in revlog.parents(n) if p != nullid]
1231 1231 parentlst.extend(p)
1232 1232 for n in hasset:
1233 1233 msngset.pop(n, None)
1234 1234
1235 1235 # This is a function generating function used to set up an environment
1236 1236 # for the inner function to execute in.
1237 1237 def manifest_and_file_collector(changedfileset):
1238 1238 # This is an information gathering function that gathers
1239 1239 # information from each changeset node that goes out as part of
1240 1240 # the changegroup. The information gathered is a list of which
1241 1241 # manifest nodes are potentially required (the recipient may
1242 1242 # already have them) and total list of all files which were
1243 1243 # changed in any changeset in the changegroup.
1244 1244 #
1245 1245 # We also remember the first changenode we saw any manifest
1246 1246 # referenced by so we can later determine which changenode 'owns'
1247 1247 # the manifest.
1248 1248 def collect_manifests_and_files(clnode):
1249 1249 c = cl.read(clnode)
1250 1250 for f in c[3]:
1251 1251 # This is to make sure we only have one instance of each
1252 1252 # filename string for each filename.
1253 1253 changedfileset.setdefault(f, f)
1254 1254 msng_mnfst_set.setdefault(c[0], clnode)
1255 1255 return collect_manifests_and_files
1256 1256
1257 1257 # Figure out which manifest nodes (of the ones we think might be part
1258 1258 # of the changegroup) the recipient must know about and remove them
1259 1259 # from the changegroup.
1260 1260 def prune_manifests():
1261 1261 has_mnfst_set = {}
1262 1262 for n in msng_mnfst_set:
1263 1263 # If a 'missing' manifest thinks it belongs to a changenode
1264 1264 # the recipient is assumed to have, obviously the recipient
1265 1265 # must have that manifest.
1266 1266 linknode = cl.node(mnfst.linkrev(n))
1267 1267 if linknode in has_cl_set:
1268 1268 has_mnfst_set[n] = 1
1269 1269 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1270 1270
1271 1271 # Use the information collected in collect_manifests_and_files to say
1272 1272 # which changenode any manifestnode belongs to.
1273 1273 def lookup_manifest_link(mnfstnode):
1274 1274 return msng_mnfst_set[mnfstnode]
1275 1275
1276 1276 # A function generating function that sets up the initial environment
1277 1277 # the inner function.
1278 1278 def filenode_collector(changedfiles):
1279 1279 next_rev = [0]
1280 1280 # This gathers information from each manifestnode included in the
1281 1281 # changegroup about which filenodes the manifest node references
1282 1282 # so we can include those in the changegroup too.
1283 1283 #
1284 1284 # It also remembers which changenode each filenode belongs to. It
1285 1285 # does this by assuming the a filenode belongs to the changenode
1286 1286 # the first manifest that references it belongs to.
1287 1287 def collect_msng_filenodes(mnfstnode):
1288 1288 r = mnfst.rev(mnfstnode)
1289 1289 if r == next_rev[0]:
1290 1290 # If the last rev we looked at was the one just previous,
1291 1291 # we only need to see a diff.
1292 1292 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1293 1293 # For each line in the delta
1294 1294 for dline in delta.splitlines():
1295 1295 # get the filename and filenode for that line
1296 1296 f, fnode = dline.split('\0')
1297 1297 fnode = bin(fnode[:40])
1298 1298 f = changedfiles.get(f, None)
1299 1299 # And if the file is in the list of files we care
1300 1300 # about.
1301 1301 if f is not None:
1302 1302 # Get the changenode this manifest belongs to
1303 1303 clnode = msng_mnfst_set[mnfstnode]
1304 1304 # Create the set of filenodes for the file if
1305 1305 # there isn't one already.
1306 1306 ndset = msng_filenode_set.setdefault(f, {})
1307 1307 # And set the filenode's changelog node to the
1308 1308 # manifest's if it hasn't been set already.
1309 1309 ndset.setdefault(fnode, clnode)
1310 1310 else:
1311 1311 # Otherwise we need a full manifest.
1312 1312 m = mnfst.read(mnfstnode)
1313 1313 # For every file in we care about.
1314 1314 for f in changedfiles:
1315 1315 fnode = m.get(f, None)
1316 1316 # If it's in the manifest
1317 1317 if fnode is not None:
1318 1318 # See comments above.
1319 1319 clnode = msng_mnfst_set[mnfstnode]
1320 1320 ndset = msng_filenode_set.setdefault(f, {})
1321 1321 ndset.setdefault(fnode, clnode)
1322 1322 # Remember the revision we hope to see next.
1323 1323 next_rev[0] = r + 1
1324 1324 return collect_msng_filenodes
1325 1325
1326 1326 # We have a list of filenodes we think we need for a file, lets remove
1327 1327 # all those we now the recipient must have.
1328 1328 def prune_filenodes(f, filerevlog):
1329 1329 msngset = msng_filenode_set[f]
1330 1330 hasset = {}
1331 1331 # If a 'missing' filenode thinks it belongs to a changenode we
1332 1332 # assume the recipient must have, then the recipient must have
1333 1333 # that filenode.
1334 1334 for n in msngset:
1335 1335 clnode = cl.node(filerevlog.linkrev(n))
1336 1336 if clnode in has_cl_set:
1337 1337 hasset[n] = 1
1338 1338 prune_parents(filerevlog, hasset, msngset)
1339 1339
1340 1340 # A function generator function that sets up the a context for the
1341 1341 # inner function.
1342 1342 def lookup_filenode_link_func(fname):
1343 1343 msngset = msng_filenode_set[fname]
1344 1344 # Lookup the changenode the filenode belongs to.
1345 1345 def lookup_filenode_link(fnode):
1346 1346 return msngset[fnode]
1347 1347 return lookup_filenode_link
1348 1348
1349 1349 # Now that we have all theses utility functions to help out and
1350 1350 # logically divide up the task, generate the group.
1351 1351 def gengroup():
1352 1352 # The set of changed files starts empty.
1353 1353 changedfiles = {}
1354 1354 # Create a changenode group generator that will call our functions
1355 1355 # back to lookup the owning changenode and collect information.
1356 1356 group = cl.group(msng_cl_lst, identity,
1357 1357 manifest_and_file_collector(changedfiles))
1358 1358 for chnk in group:
1359 1359 yield chnk
1360 1360
1361 1361 # The list of manifests has been collected by the generator
1362 1362 # calling our functions back.
1363 1363 prune_manifests()
1364 1364 msng_mnfst_lst = msng_mnfst_set.keys()
1365 1365 # Sort the manifestnodes by revision number.
1366 1366 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1367 1367 # Create a generator for the manifestnodes that calls our lookup
1368 1368 # and data collection functions back.
1369 1369 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1370 1370 filenode_collector(changedfiles))
1371 1371 for chnk in group:
1372 1372 yield chnk
1373 1373
1374 1374 # These are no longer needed, dereference and toss the memory for
1375 1375 # them.
1376 1376 msng_mnfst_lst = None
1377 1377 msng_mnfst_set.clear()
1378 1378
1379 1379 changedfiles = changedfiles.keys()
1380 1380 changedfiles.sort()
1381 1381 # Go through all our files in order sorted by name.
1382 1382 for fname in changedfiles:
1383 1383 filerevlog = self.file(fname)
1384 1384 # Toss out the filenodes that the recipient isn't really
1385 1385 # missing.
1386 1386 if msng_filenode_set.has_key(fname):
1387 1387 prune_filenodes(fname, filerevlog)
1388 1388 msng_filenode_lst = msng_filenode_set[fname].keys()
1389 1389 else:
1390 1390 msng_filenode_lst = []
1391 1391 # If any filenodes are left, generate the group for them,
1392 1392 # otherwise don't bother.
1393 1393 if len(msng_filenode_lst) > 0:
1394 1394 yield changegroup.genchunk(fname)
1395 1395 # Sort the filenodes by their revision #
1396 1396 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1397 1397 # Create a group generator and only pass in a changenode
1398 1398 # lookup function as we need to collect no information
1399 1399 # from filenodes.
1400 1400 group = filerevlog.group(msng_filenode_lst,
1401 1401 lookup_filenode_link_func(fname))
1402 1402 for chnk in group:
1403 1403 yield chnk
1404 1404 if msng_filenode_set.has_key(fname):
1405 1405 # Don't need this anymore, toss it to free memory.
1406 1406 del msng_filenode_set[fname]
1407 1407 # Signal that no more groups are left.
1408 1408 yield changegroup.closechunk()
1409 1409
1410 1410 if msng_cl_lst:
1411 1411 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1412 1412
1413 1413 return util.chunkbuffer(gengroup())
1414 1414
1415 1415 def changegroup(self, basenodes, source):
1416 1416 """Generate a changegroup of all nodes that we have that a recipient
1417 1417 doesn't.
1418 1418
1419 1419 This is much easier than the previous function as we can assume that
1420 1420 the recipient has any changenode we aren't sending them."""
1421 1421
1422 1422 self.hook('preoutgoing', throw=True, source=source)
1423 1423
1424 1424 cl = self.changelog
1425 1425 nodes = cl.nodesbetween(basenodes, None)[0]
1426 1426 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1427 1427
1428 1428 def identity(x):
1429 1429 return x
1430 1430
1431 1431 def gennodelst(revlog):
1432 1432 for r in xrange(0, revlog.count()):
1433 1433 n = revlog.node(r)
1434 1434 if revlog.linkrev(n) in revset:
1435 1435 yield n
1436 1436
1437 1437 def changed_file_collector(changedfileset):
1438 1438 def collect_changed_files(clnode):
1439 1439 c = cl.read(clnode)
1440 1440 for fname in c[3]:
1441 1441 changedfileset[fname] = 1
1442 1442 return collect_changed_files
1443 1443
1444 1444 def lookuprevlink_func(revlog):
1445 1445 def lookuprevlink(n):
1446 1446 return cl.node(revlog.linkrev(n))
1447 1447 return lookuprevlink
1448 1448
1449 1449 def gengroup():
1450 1450 # construct a list of all changed files
1451 1451 changedfiles = {}
1452 1452
1453 1453 for chnk in cl.group(nodes, identity,
1454 1454 changed_file_collector(changedfiles)):
1455 1455 yield chnk
1456 1456 changedfiles = changedfiles.keys()
1457 1457 changedfiles.sort()
1458 1458
1459 1459 mnfst = self.manifest
1460 1460 nodeiter = gennodelst(mnfst)
1461 1461 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1462 1462 yield chnk
1463 1463
1464 1464 for fname in changedfiles:
1465 1465 filerevlog = self.file(fname)
1466 1466 nodeiter = gennodelst(filerevlog)
1467 1467 nodeiter = list(nodeiter)
1468 1468 if nodeiter:
1469 1469 yield changegroup.genchunk(fname)
1470 1470 lookup = lookuprevlink_func(filerevlog)
1471 1471 for chnk in filerevlog.group(nodeiter, lookup):
1472 1472 yield chnk
1473 1473
1474 1474 yield changegroup.closechunk()
1475 1475
1476 1476 if nodes:
1477 1477 self.hook('outgoing', node=hex(nodes[0]), source=source)
1478 1478
1479 1479 return util.chunkbuffer(gengroup())
1480 1480
1481 1481 def addchangegroup(self, source, srctype):
1482 1482 """add changegroup to repo.
1483 1483 returns number of heads modified or added + 1."""
1484 1484
1485 1485 def csmap(x):
1486 1486 self.ui.debug(_("add changeset %s\n") % short(x))
1487 1487 return cl.count()
1488 1488
1489 1489 def revmap(x):
1490 1490 return cl.rev(x)
1491 1491
1492 1492 if not source:
1493 1493 return 0
1494 1494
1495 1495 self.hook('prechangegroup', throw=True, source=srctype)
1496 1496
1497 1497 changesets = files = revisions = 0
1498 1498
1499 1499 tr = self.transaction()
1500 1500
1501 # write changelog and manifest data to temp files so
1502 # concurrent readers will not see inconsistent view
1501 # write changelog data to temp files so concurrent readers will not see
1502 # inconsistent view
1503 1503 cl = None
1504 1504 try:
1505 1505 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1506 1506
1507 1507 oldheads = len(cl.heads())
1508 1508
1509 1509 # pull off the changeset group
1510 1510 self.ui.status(_("adding changesets\n"))
1511 1511 cor = cl.count() - 1
1512 1512 chunkiter = changegroup.chunkiter(source)
1513 1513 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1514 1514 raise util.Abort(_("received changelog group is empty"))
1515 1515 cnr = cl.count() - 1
1516 1516 changesets = cnr - cor
1517 1517
1518 mf = None
1519 try:
1520 mf = appendfile.appendmanifest(self.opener,
1521 self.manifest.version)
1522
1523 1518 # pull off the manifest group
1524 1519 self.ui.status(_("adding manifests\n"))
1525 1520 chunkiter = changegroup.chunkiter(source)
1526 1521 # no need to check for empty manifest group here:
1527 1522 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1528 1523 # no new manifest will be created and the manifest group will
1529 1524 # be empty during the pull
1530 mf.addgroup(chunkiter, revmap, tr)
1525 self.manifest.addgroup(chunkiter, revmap, tr)
1531 1526
1532 1527 # process the files
1533 1528 self.ui.status(_("adding file changes\n"))
1534 1529 while 1:
1535 1530 f = changegroup.getchunk(source)
1536 1531 if not f:
1537 1532 break
1538 1533 self.ui.debug(_("adding %s revisions\n") % f)
1539 1534 fl = self.file(f)
1540 1535 o = fl.count()
1541 1536 chunkiter = changegroup.chunkiter(source)
1542 1537 if fl.addgroup(chunkiter, revmap, tr) is None:
1543 1538 raise util.Abort(_("received file revlog group is empty"))
1544 1539 revisions += fl.count() - o
1545 1540 files += 1
1546 1541
1547 # write order here is important so concurrent readers will see
1548 # consistent view of repo
1549 mf.writedata()
1550 finally:
1551 if mf:
1552 mf.cleanup()
1553 1542 cl.writedata()
1554 1543 finally:
1555 1544 if cl:
1556 1545 cl.cleanup()
1557 1546
1558 # make changelog and manifest see real files again
1547 # make changelog see real files again
1559 1548 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1560 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1561 1549 self.changelog.checkinlinesize(tr)
1562 self.manifest.checkinlinesize(tr)
1563 1550
1564 1551 newheads = len(self.changelog.heads())
1565 1552 heads = ""
1566 1553 if oldheads and newheads > oldheads:
1567 1554 heads = _(" (+%d heads)") % (newheads - oldheads)
1568 1555
1569 1556 self.ui.status(_("added %d changesets"
1570 1557 " with %d changes to %d files%s\n")
1571 1558 % (changesets, revisions, files, heads))
1572 1559
1573 1560 if changesets > 0:
1574 1561 self.hook('pretxnchangegroup', throw=True,
1575 1562 node=hex(self.changelog.node(cor+1)), source=srctype)
1576 1563
1577 1564 tr.close()
1578 1565
1579 1566 if changesets > 0:
1580 1567 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1581 1568 source=srctype)
1582 1569
1583 1570 for i in range(cor + 1, cnr + 1):
1584 1571 self.hook("incoming", node=hex(self.changelog.node(i)),
1585 1572 source=srctype)
1586 1573
1587 1574 return newheads - oldheads + 1
1588 1575
1589 1576 def update(self, node, allow=False, force=False, choose=None,
1590 1577 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1591 1578 pl = self.dirstate.parents()
1592 1579 if not force and pl[1] != nullid:
1593 1580 raise util.Abort(_("outstanding uncommitted merges"))
1594 1581
1595 1582 err = False
1596 1583
1597 1584 p1, p2 = pl[0], node
1598 1585 pa = self.changelog.ancestor(p1, p2)
1599 1586 m1n = self.changelog.read(p1)[0]
1600 1587 m2n = self.changelog.read(p2)[0]
1601 1588 man = self.manifest.ancestor(m1n, m2n)
1602 1589 m1 = self.manifest.read(m1n)
1603 1590 mf1 = self.manifest.readflags(m1n)
1604 1591 m2 = self.manifest.read(m2n).copy()
1605 1592 mf2 = self.manifest.readflags(m2n)
1606 1593 ma = self.manifest.read(man)
1607 1594 mfa = self.manifest.readflags(man)
1608 1595
1609 1596 modified, added, removed, deleted, unknown = self.changes()
1610 1597
1611 1598 # is this a jump, or a merge? i.e. is there a linear path
1612 1599 # from p1 to p2?
1613 1600 linear_path = (pa == p1 or pa == p2)
1614 1601
1615 1602 if allow and linear_path:
1616 1603 raise util.Abort(_("there is nothing to merge, "
1617 1604 "just use 'hg update'"))
1618 1605 if allow and not forcemerge:
1619 1606 if modified or added or removed:
1620 1607 raise util.Abort(_("outstanding uncommitted changes"))
1621 1608
1622 1609 if not forcemerge and not force:
1623 1610 for f in unknown:
1624 1611 if f in m2:
1625 1612 t1 = self.wread(f)
1626 1613 t2 = self.file(f).read(m2[f])
1627 1614 if cmp(t1, t2) != 0:
1628 1615 raise util.Abort(_("'%s' already exists in the working"
1629 1616 " dir and differs from remote") % f)
1630 1617
1631 1618 # resolve the manifest to determine which files
1632 1619 # we care about merging
1633 1620 self.ui.note(_("resolving manifests\n"))
1634 1621 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1635 1622 (force, allow, moddirstate, linear_path))
1636 1623 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1637 1624 (short(man), short(m1n), short(m2n)))
1638 1625
1639 1626 merge = {}
1640 1627 get = {}
1641 1628 remove = []
1642 1629
1643 1630 # construct a working dir manifest
1644 1631 mw = m1.copy()
1645 1632 mfw = mf1.copy()
1646 1633 umap = dict.fromkeys(unknown)
1647 1634
1648 1635 for f in added + modified + unknown:
1649 1636 mw[f] = ""
1650 1637 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1651 1638
1652 1639 if moddirstate and not wlock:
1653 1640 wlock = self.wlock()
1654 1641
1655 1642 for f in deleted + removed:
1656 1643 if f in mw:
1657 1644 del mw[f]
1658 1645
1659 1646 # If we're jumping between revisions (as opposed to merging),
1660 1647 # and if neither the working directory nor the target rev has
1661 1648 # the file, then we need to remove it from the dirstate, to
1662 1649 # prevent the dirstate from listing the file when it is no
1663 1650 # longer in the manifest.
1664 1651 if moddirstate and linear_path and f not in m2:
1665 1652 self.dirstate.forget((f,))
1666 1653
1667 1654 # Compare manifests
1668 1655 for f, n in mw.iteritems():
1669 1656 if choose and not choose(f):
1670 1657 continue
1671 1658 if f in m2:
1672 1659 s = 0
1673 1660
1674 1661 # is the wfile new since m1, and match m2?
1675 1662 if f not in m1:
1676 1663 t1 = self.wread(f)
1677 1664 t2 = self.file(f).read(m2[f])
1678 1665 if cmp(t1, t2) == 0:
1679 1666 n = m2[f]
1680 1667 del t1, t2
1681 1668
1682 1669 # are files different?
1683 1670 if n != m2[f]:
1684 1671 a = ma.get(f, nullid)
1685 1672 # are both different from the ancestor?
1686 1673 if n != a and m2[f] != a:
1687 1674 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1688 1675 # merge executable bits
1689 1676 # "if we changed or they changed, change in merge"
1690 1677 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1691 1678 mode = ((a^b) | (a^c)) ^ a
1692 1679 merge[f] = (m1.get(f, nullid), m2[f], mode)
1693 1680 s = 1
1694 1681 # are we clobbering?
1695 1682 # is remote's version newer?
1696 1683 # or are we going back in time?
1697 1684 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1698 1685 self.ui.debug(_(" remote %s is newer, get\n") % f)
1699 1686 get[f] = m2[f]
1700 1687 s = 1
1701 1688 elif f in umap or f in added:
1702 1689 # this unknown file is the same as the checkout
1703 1690 # we need to reset the dirstate if the file was added
1704 1691 get[f] = m2[f]
1705 1692
1706 1693 if not s and mfw[f] != mf2[f]:
1707 1694 if force:
1708 1695 self.ui.debug(_(" updating permissions for %s\n") % f)
1709 1696 util.set_exec(self.wjoin(f), mf2[f])
1710 1697 else:
1711 1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1712 1699 mode = ((a^b) | (a^c)) ^ a
1713 1700 if mode != b:
1714 1701 self.ui.debug(_(" updating permissions for %s\n")
1715 1702 % f)
1716 1703 util.set_exec(self.wjoin(f), mode)
1717 1704 del m2[f]
1718 1705 elif f in ma:
1719 1706 if n != ma[f]:
1720 1707 r = _("d")
1721 1708 if not force and (linear_path or allow):
1722 1709 r = self.ui.prompt(
1723 1710 (_(" local changed %s which remote deleted\n") % f) +
1724 1711 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1725 1712 if r == _("d"):
1726 1713 remove.append(f)
1727 1714 else:
1728 1715 self.ui.debug(_("other deleted %s\n") % f)
1729 1716 remove.append(f) # other deleted it
1730 1717 else:
1731 1718 # file is created on branch or in working directory
1732 1719 if force and f not in umap:
1733 1720 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1734 1721 remove.append(f)
1735 1722 elif n == m1.get(f, nullid): # same as parent
1736 1723 if p2 == pa: # going backwards?
1737 1724 self.ui.debug(_("remote deleted %s\n") % f)
1738 1725 remove.append(f)
1739 1726 else:
1740 1727 self.ui.debug(_("local modified %s, keeping\n") % f)
1741 1728 else:
1742 1729 self.ui.debug(_("working dir created %s, keeping\n") % f)
1743 1730
1744 1731 for f, n in m2.iteritems():
1745 1732 if choose and not choose(f):
1746 1733 continue
1747 1734 if f[0] == "/":
1748 1735 continue
1749 1736 if f in ma and n != ma[f]:
1750 1737 r = _("k")
1751 1738 if not force and (linear_path or allow):
1752 1739 r = self.ui.prompt(
1753 1740 (_("remote changed %s which local deleted\n") % f) +
1754 1741 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1755 1742 if r == _("k"):
1756 1743 get[f] = n
1757 1744 elif f not in ma:
1758 1745 self.ui.debug(_("remote created %s\n") % f)
1759 1746 get[f] = n
1760 1747 else:
1761 1748 if force or p2 == pa: # going backwards?
1762 1749 self.ui.debug(_("local deleted %s, recreating\n") % f)
1763 1750 get[f] = n
1764 1751 else:
1765 1752 self.ui.debug(_("local deleted %s\n") % f)
1766 1753
1767 1754 del mw, m1, m2, ma
1768 1755
1769 1756 if force:
1770 1757 for f in merge:
1771 1758 get[f] = merge[f][1]
1772 1759 merge = {}
1773 1760
1774 1761 if linear_path or force:
1775 1762 # we don't need to do any magic, just jump to the new rev
1776 1763 branch_merge = False
1777 1764 p1, p2 = p2, nullid
1778 1765 else:
1779 1766 if not allow:
1780 1767 self.ui.status(_("this update spans a branch"
1781 1768 " affecting the following files:\n"))
1782 1769 fl = merge.keys() + get.keys()
1783 1770 fl.sort()
1784 1771 for f in fl:
1785 1772 cf = ""
1786 1773 if f in merge:
1787 1774 cf = _(" (resolve)")
1788 1775 self.ui.status(" %s%s\n" % (f, cf))
1789 1776 self.ui.warn(_("aborting update spanning branches!\n"))
1790 1777 self.ui.status(_("(use 'hg merge' to merge across branches"
1791 1778 " or 'hg update -C' to lose changes)\n"))
1792 1779 return 1
1793 1780 branch_merge = True
1794 1781
1795 1782 xp1 = hex(p1)
1796 1783 xp2 = hex(p2)
1797 1784 if p2 == nullid: xxp2 = ''
1798 1785 else: xxp2 = xp2
1799 1786
1800 1787 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1801 1788
1802 1789 # get the files we don't need to change
1803 1790 files = get.keys()
1804 1791 files.sort()
1805 1792 for f in files:
1806 1793 if f[0] == "/":
1807 1794 continue
1808 1795 self.ui.note(_("getting %s\n") % f)
1809 1796 t = self.file(f).read(get[f])
1810 1797 self.wwrite(f, t)
1811 1798 util.set_exec(self.wjoin(f), mf2[f])
1812 1799 if moddirstate:
1813 1800 if branch_merge:
1814 1801 self.dirstate.update([f], 'n', st_mtime=-1)
1815 1802 else:
1816 1803 self.dirstate.update([f], 'n')
1817 1804
1818 1805 # merge the tricky bits
1819 1806 failedmerge = []
1820 1807 files = merge.keys()
1821 1808 files.sort()
1822 1809 for f in files:
1823 1810 self.ui.status(_("merging %s\n") % f)
1824 1811 my, other, flag = merge[f]
1825 1812 ret = self.merge3(f, my, other, xp1, xp2)
1826 1813 if ret:
1827 1814 err = True
1828 1815 failedmerge.append(f)
1829 1816 util.set_exec(self.wjoin(f), flag)
1830 1817 if moddirstate:
1831 1818 if branch_merge:
1832 1819 # We've done a branch merge, mark this file as merged
1833 1820 # so that we properly record the merger later
1834 1821 self.dirstate.update([f], 'm')
1835 1822 else:
1836 1823 # We've update-merged a locally modified file, so
1837 1824 # we set the dirstate to emulate a normal checkout
1838 1825 # of that file some time in the past. Thus our
1839 1826 # merge will appear as a normal local file
1840 1827 # modification.
1841 1828 f_len = len(self.file(f).read(other))
1842 1829 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1843 1830
1844 1831 remove.sort()
1845 1832 for f in remove:
1846 1833 self.ui.note(_("removing %s\n") % f)
1847 1834 util.audit_path(f)
1848 1835 try:
1849 1836 util.unlink(self.wjoin(f))
1850 1837 except OSError, inst:
1851 1838 if inst.errno != errno.ENOENT:
1852 1839 self.ui.warn(_("update failed to remove %s: %s!\n") %
1853 1840 (f, inst.strerror))
1854 1841 if moddirstate:
1855 1842 if branch_merge:
1856 1843 self.dirstate.update(remove, 'r')
1857 1844 else:
1858 1845 self.dirstate.forget(remove)
1859 1846
1860 1847 if moddirstate:
1861 1848 self.dirstate.setparents(p1, p2)
1862 1849
1863 1850 if show_stats:
1864 1851 stats = ((len(get), _("updated")),
1865 1852 (len(merge) - len(failedmerge), _("merged")),
1866 1853 (len(remove), _("removed")),
1867 1854 (len(failedmerge), _("unresolved")))
1868 1855 note = ", ".join([_("%d files %s") % s for s in stats])
1869 1856 self.ui.status("%s\n" % note)
1870 1857 if moddirstate:
1871 1858 if branch_merge:
1872 1859 if failedmerge:
1873 1860 self.ui.status(_("There are unresolved merges,"
1874 1861 " you can redo the full merge using:\n"
1875 1862 " hg update -C %s\n"
1876 1863 " hg merge %s\n"
1877 1864 % (self.changelog.rev(p1),
1878 1865 self.changelog.rev(p2))))
1879 1866 else:
1880 1867 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1881 1868 elif failedmerge:
1882 1869 self.ui.status(_("There are unresolved merges with"
1883 1870 " locally modified files.\n"))
1884 1871
1885 1872 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1886 1873 return err
1887 1874
1888 1875 def merge3(self, fn, my, other, p1, p2):
1889 1876 """perform a 3-way merge in the working directory"""
1890 1877
1891 1878 def temp(prefix, node):
1892 1879 pre = "%s~%s." % (os.path.basename(fn), prefix)
1893 1880 (fd, name) = tempfile.mkstemp(prefix=pre)
1894 1881 f = os.fdopen(fd, "wb")
1895 1882 self.wwrite(fn, fl.read(node), f)
1896 1883 f.close()
1897 1884 return name
1898 1885
1899 1886 fl = self.file(fn)
1900 1887 base = fl.ancestor(my, other)
1901 1888 a = self.wjoin(fn)
1902 1889 b = temp("base", base)
1903 1890 c = temp("other", other)
1904 1891
1905 1892 self.ui.note(_("resolving %s\n") % fn)
1906 1893 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1907 1894 (fn, short(my), short(other), short(base)))
1908 1895
1909 1896 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1910 1897 or "hgmerge")
1911 1898 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1912 1899 environ={'HG_FILE': fn,
1913 1900 'HG_MY_NODE': p1,
1914 1901 'HG_OTHER_NODE': p2,
1915 1902 'HG_FILE_MY_NODE': hex(my),
1916 1903 'HG_FILE_OTHER_NODE': hex(other),
1917 1904 'HG_FILE_BASE_NODE': hex(base)})
1918 1905 if r:
1919 1906 self.ui.warn(_("merging %s failed!\n") % fn)
1920 1907
1921 1908 os.unlink(b)
1922 1909 os.unlink(c)
1923 1910 return r
1924 1911
1925 1912 def verify(self):
1926 1913 filelinkrevs = {}
1927 1914 filenodes = {}
1928 1915 changesets = revisions = files = 0
1929 1916 errors = [0]
1930 1917 warnings = [0]
1931 1918 neededmanifests = {}
1932 1919
1933 1920 def err(msg):
1934 1921 self.ui.warn(msg + "\n")
1935 1922 errors[0] += 1
1936 1923
1937 1924 def warn(msg):
1938 1925 self.ui.warn(msg + "\n")
1939 1926 warnings[0] += 1
1940 1927
1941 1928 def checksize(obj, name):
1942 1929 d = obj.checksize()
1943 1930 if d[0]:
1944 1931 err(_("%s data length off by %d bytes") % (name, d[0]))
1945 1932 if d[1]:
1946 1933 err(_("%s index contains %d extra bytes") % (name, d[1]))
1947 1934
1948 1935 def checkversion(obj, name):
1949 1936 if obj.version != revlog.REVLOGV0:
1950 1937 if not revlogv1:
1951 1938 warn(_("warning: `%s' uses revlog format 1") % name)
1952 1939 elif revlogv1:
1953 1940 warn(_("warning: `%s' uses revlog format 0") % name)
1954 1941
1955 1942 revlogv1 = self.revlogversion != revlog.REVLOGV0
1956 1943 if self.ui.verbose or revlogv1 != self.revlogv1:
1957 1944 self.ui.status(_("repository uses revlog format %d\n") %
1958 1945 (revlogv1 and 1 or 0))
1959 1946
1960 1947 seen = {}
1961 1948 self.ui.status(_("checking changesets\n"))
1962 1949 checksize(self.changelog, "changelog")
1963 1950
1964 1951 for i in range(self.changelog.count()):
1965 1952 changesets += 1
1966 1953 n = self.changelog.node(i)
1967 1954 l = self.changelog.linkrev(n)
1968 1955 if l != i:
1969 1956 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1970 1957 if n in seen:
1971 1958 err(_("duplicate changeset at revision %d") % i)
1972 1959 seen[n] = 1
1973 1960
1974 1961 for p in self.changelog.parents(n):
1975 1962 if p not in self.changelog.nodemap:
1976 1963 err(_("changeset %s has unknown parent %s") %
1977 1964 (short(n), short(p)))
1978 1965 try:
1979 1966 changes = self.changelog.read(n)
1980 1967 except KeyboardInterrupt:
1981 1968 self.ui.warn(_("interrupted"))
1982 1969 raise
1983 1970 except Exception, inst:
1984 1971 err(_("unpacking changeset %s: %s") % (short(n), inst))
1985 1972 continue
1986 1973
1987 1974 neededmanifests[changes[0]] = n
1988 1975
1989 1976 for f in changes[3]:
1990 1977 filelinkrevs.setdefault(f, []).append(i)
1991 1978
1992 1979 seen = {}
1993 1980 self.ui.status(_("checking manifests\n"))
1994 1981 checkversion(self.manifest, "manifest")
1995 1982 checksize(self.manifest, "manifest")
1996 1983
1997 1984 for i in range(self.manifest.count()):
1998 1985 n = self.manifest.node(i)
1999 1986 l = self.manifest.linkrev(n)
2000 1987
2001 1988 if l < 0 or l >= self.changelog.count():
2002 1989 err(_("bad manifest link (%d) at revision %d") % (l, i))
2003 1990
2004 1991 if n in neededmanifests:
2005 1992 del neededmanifests[n]
2006 1993
2007 1994 if n in seen:
2008 1995 err(_("duplicate manifest at revision %d") % i)
2009 1996
2010 1997 seen[n] = 1
2011 1998
2012 1999 for p in self.manifest.parents(n):
2013 2000 if p not in self.manifest.nodemap:
2014 2001 err(_("manifest %s has unknown parent %s") %
2015 2002 (short(n), short(p)))
2016 2003
2017 2004 try:
2018 2005 delta = mdiff.patchtext(self.manifest.delta(n))
2019 2006 except KeyboardInterrupt:
2020 2007 self.ui.warn(_("interrupted"))
2021 2008 raise
2022 2009 except Exception, inst:
2023 2010 err(_("unpacking manifest %s: %s") % (short(n), inst))
2024 2011 continue
2025 2012
2026 2013 try:
2027 2014 ff = [ l.split('\0') for l in delta.splitlines() ]
2028 2015 for f, fn in ff:
2029 2016 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2030 2017 except (ValueError, TypeError), inst:
2031 2018 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2032 2019
2033 2020 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2034 2021
2035 2022 for m, c in neededmanifests.items():
2036 2023 err(_("Changeset %s refers to unknown manifest %s") %
2037 2024 (short(m), short(c)))
2038 2025 del neededmanifests
2039 2026
2040 2027 for f in filenodes:
2041 2028 if f not in filelinkrevs:
2042 2029 err(_("file %s in manifest but not in changesets") % f)
2043 2030
2044 2031 for f in filelinkrevs:
2045 2032 if f not in filenodes:
2046 2033 err(_("file %s in changeset but not in manifest") % f)
2047 2034
2048 2035 self.ui.status(_("checking files\n"))
2049 2036 ff = filenodes.keys()
2050 2037 ff.sort()
2051 2038 for f in ff:
2052 2039 if f == "/dev/null":
2053 2040 continue
2054 2041 files += 1
2055 2042 if not f:
2056 2043 err(_("file without name in manifest %s") % short(n))
2057 2044 continue
2058 2045 fl = self.file(f)
2059 2046 checkversion(fl, f)
2060 2047 checksize(fl, f)
2061 2048
2062 2049 nodes = {nullid: 1}
2063 2050 seen = {}
2064 2051 for i in range(fl.count()):
2065 2052 revisions += 1
2066 2053 n = fl.node(i)
2067 2054
2068 2055 if n in seen:
2069 2056 err(_("%s: duplicate revision %d") % (f, i))
2070 2057 if n not in filenodes[f]:
2071 2058 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2072 2059 else:
2073 2060 del filenodes[f][n]
2074 2061
2075 2062 flr = fl.linkrev(n)
2076 2063 if flr not in filelinkrevs.get(f, []):
2077 2064 err(_("%s:%s points to unexpected changeset %d")
2078 2065 % (f, short(n), flr))
2079 2066 else:
2080 2067 filelinkrevs[f].remove(flr)
2081 2068
2082 2069 # verify contents
2083 2070 try:
2084 2071 t = fl.read(n)
2085 2072 except KeyboardInterrupt:
2086 2073 self.ui.warn(_("interrupted"))
2087 2074 raise
2088 2075 except Exception, inst:
2089 2076 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2090 2077
2091 2078 # verify parents
2092 2079 (p1, p2) = fl.parents(n)
2093 2080 if p1 not in nodes:
2094 2081 err(_("file %s:%s unknown parent 1 %s") %
2095 2082 (f, short(n), short(p1)))
2096 2083 if p2 not in nodes:
2097 2084 err(_("file %s:%s unknown parent 2 %s") %
2098 2085 (f, short(n), short(p1)))
2099 2086 nodes[n] = 1
2100 2087
2101 2088 # cross-check
2102 2089 for node in filenodes[f]:
2103 2090 err(_("node %s in manifests not in %s") % (hex(node), f))
2104 2091
2105 2092 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2106 2093 (files, changesets, revisions))
2107 2094
2108 2095 if warnings[0]:
2109 2096 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2110 2097 if errors[0]:
2111 2098 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2112 2099 return 1
2113 2100
2114 2101 # used to avoid circular references so destructors work
2115 2102 def aftertrans(base):
2116 2103 p = base
2117 2104 def a():
2118 2105 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2119 2106 util.rename(os.path.join(p, "journal.dirstate"),
2120 2107 os.path.join(p, "undo.dirstate"))
2121 2108 return a
2122 2109
General Comments 0
You need to be logged in to leave comments. Login now