##// END OF EJS Templates
Fix hg push and hg push -r sometimes creating new heads without --force....
Thomas Arendsen Hein -
r2021:fc22ed56 default
parent child Browse files
Show More
@@ -1,1923 +1,1949 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 14 demandload(globals(), "appendfile changegroup")
15 15
16 16 class localrepository(object):
17 17 def __del__(self):
18 18 self.transhandle = None
19 19 def __init__(self, parentui, path=None, create=0):
20 20 if not path:
21 21 p = os.getcwd()
22 22 while not os.path.isdir(os.path.join(p, ".hg")):
23 23 oldp = p
24 24 p = os.path.dirname(p)
25 25 if p == oldp:
26 26 raise repo.RepoError(_("no repo found"))
27 27 path = p
28 28 self.path = os.path.join(path, ".hg")
29 29
30 30 if not create and not os.path.isdir(self.path):
31 31 raise repo.RepoError(_("repository %s not found") % path)
32 32
33 33 self.root = os.path.abspath(path)
34 34 self.origroot = path
35 35 self.ui = ui.ui(parentui=parentui)
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38 self.manifest = manifest.manifest(self.opener)
39 39 self.changelog = changelog.changelog(self.opener)
40 40 self.tagscache = None
41 41 self.nodetagscache = None
42 42 self.encodepats = None
43 43 self.decodepats = None
44 44 self.transhandle = None
45 45
46 46 if create:
47 47 os.mkdir(self.path)
48 48 os.mkdir(self.join("data"))
49 49
50 50 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
51 51 try:
52 52 self.ui.readconfig(self.join("hgrc"), self.root)
53 53 except IOError:
54 54 pass
55 55
56 56 def hook(self, name, throw=False, **args):
57 57 def runhook(name, cmd):
58 58 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
59 59 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
60 60 [(k.upper(), v) for k, v in args.iteritems()])
61 61 r = util.system(cmd, environ=env, cwd=self.root)
62 62 if r:
63 63 desc, r = util.explain_exit(r)
64 64 if throw:
65 65 raise util.Abort(_('%s hook %s') % (name, desc))
66 66 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
67 67 return False
68 68 return True
69 69
70 70 r = True
71 71 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
72 72 if hname.split(".", 1)[0] == name and cmd]
73 73 hooks.sort()
74 74 for hname, cmd in hooks:
75 75 r = runhook(hname, cmd) and r
76 76 return r
77 77
78 78 def tags(self):
79 79 '''return a mapping of tag to node'''
80 80 if not self.tagscache:
81 81 self.tagscache = {}
82 82
83 83 def parsetag(line, context):
84 84 if not line:
85 85 return
86 86 s = l.split(" ", 1)
87 87 if len(s) != 2:
88 88 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
89 89 return
90 90 node, key = s
91 91 try:
92 92 bin_n = bin(node)
93 93 except TypeError:
94 94 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
95 95 return
96 96 if bin_n not in self.changelog.nodemap:
97 97 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
98 98 return
99 99 self.tagscache[key.strip()] = bin_n
100 100
101 101 # read each head of the tags file, ending with the tip
102 102 # and add each tag found to the map, with "newer" ones
103 103 # taking precedence
104 104 fl = self.file(".hgtags")
105 105 h = fl.heads()
106 106 h.reverse()
107 107 for r in h:
108 108 count = 0
109 109 for l in fl.read(r).splitlines():
110 110 count += 1
111 111 parsetag(l, ".hgtags:%d" % count)
112 112
113 113 try:
114 114 f = self.opener("localtags")
115 115 count = 0
116 116 for l in f:
117 117 count += 1
118 118 parsetag(l, "localtags:%d" % count)
119 119 except IOError:
120 120 pass
121 121
122 122 self.tagscache['tip'] = self.changelog.tip()
123 123
124 124 return self.tagscache
125 125
126 126 def tagslist(self):
127 127 '''return a list of tags ordered by revision'''
128 128 l = []
129 129 for t, n in self.tags().items():
130 130 try:
131 131 r = self.changelog.rev(n)
132 132 except:
133 133 r = -2 # sort to the beginning of the list if unknown
134 134 l.append((r, t, n))
135 135 l.sort()
136 136 return [(t, n) for r, t, n in l]
137 137
138 138 def nodetags(self, node):
139 139 '''return the tags associated with a node'''
140 140 if not self.nodetagscache:
141 141 self.nodetagscache = {}
142 142 for t, n in self.tags().items():
143 143 self.nodetagscache.setdefault(n, []).append(t)
144 144 return self.nodetagscache.get(node, [])
145 145
146 146 def lookup(self, key):
147 147 try:
148 148 return self.tags()[key]
149 149 except KeyError:
150 150 try:
151 151 return self.changelog.lookup(key)
152 152 except:
153 153 raise repo.RepoError(_("unknown revision '%s'") % key)
154 154
155 155 def dev(self):
156 156 return os.stat(self.path).st_dev
157 157
158 158 def local(self):
159 159 return True
160 160
161 161 def join(self, f):
162 162 return os.path.join(self.path, f)
163 163
164 164 def wjoin(self, f):
165 165 return os.path.join(self.root, f)
166 166
167 167 def file(self, f):
168 168 if f[0] == '/':
169 169 f = f[1:]
170 170 return filelog.filelog(self.opener, f)
171 171
172 172 def getcwd(self):
173 173 return self.dirstate.getcwd()
174 174
175 175 def wfile(self, f, mode='r'):
176 176 return self.wopener(f, mode)
177 177
178 178 def wread(self, filename):
179 179 if self.encodepats == None:
180 180 l = []
181 181 for pat, cmd in self.ui.configitems("encode"):
182 182 mf = util.matcher(self.root, "", [pat], [], [])[1]
183 183 l.append((mf, cmd))
184 184 self.encodepats = l
185 185
186 186 data = self.wopener(filename, 'r').read()
187 187
188 188 for mf, cmd in self.encodepats:
189 189 if mf(filename):
190 190 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
191 191 data = util.filter(data, cmd)
192 192 break
193 193
194 194 return data
195 195
196 196 def wwrite(self, filename, data, fd=None):
197 197 if self.decodepats == None:
198 198 l = []
199 199 for pat, cmd in self.ui.configitems("decode"):
200 200 mf = util.matcher(self.root, "", [pat], [], [])[1]
201 201 l.append((mf, cmd))
202 202 self.decodepats = l
203 203
204 204 for mf, cmd in self.decodepats:
205 205 if mf(filename):
206 206 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
207 207 data = util.filter(data, cmd)
208 208 break
209 209
210 210 if fd:
211 211 return fd.write(data)
212 212 return self.wopener(filename, 'w').write(data)
213 213
214 214 def transaction(self):
215 215 tr = self.transhandle
216 216 if tr != None and tr.running():
217 217 return tr.nest()
218 218
219 219 # save dirstate for undo
220 220 try:
221 221 ds = self.opener("dirstate").read()
222 222 except IOError:
223 223 ds = ""
224 224 self.opener("journal.dirstate", "w").write(ds)
225 225
226 226 tr = transaction.transaction(self.ui.warn, self.opener,
227 227 self.join("journal"),
228 228 aftertrans(self.path))
229 229 self.transhandle = tr
230 230 return tr
231 231
232 232 def recover(self):
233 233 l = self.lock()
234 234 if os.path.exists(self.join("journal")):
235 235 self.ui.status(_("rolling back interrupted transaction\n"))
236 236 transaction.rollback(self.opener, self.join("journal"))
237 237 self.reload()
238 238 return True
239 239 else:
240 240 self.ui.warn(_("no interrupted transaction available\n"))
241 241 return False
242 242
243 243 def undo(self, wlock=None):
244 244 if not wlock:
245 245 wlock = self.wlock()
246 246 l = self.lock()
247 247 if os.path.exists(self.join("undo")):
248 248 self.ui.status(_("rolling back last transaction\n"))
249 249 transaction.rollback(self.opener, self.join("undo"))
250 250 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
251 251 self.reload()
252 252 self.wreload()
253 253 else:
254 254 self.ui.warn(_("no undo information available\n"))
255 255
256 256 def wreload(self):
257 257 self.dirstate.read()
258 258
259 259 def reload(self):
260 260 self.changelog.load()
261 261 self.manifest.load()
262 262 self.tagscache = None
263 263 self.nodetagscache = None
264 264
265 265 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
266 266 desc=None):
267 267 try:
268 268 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
269 269 except lock.LockHeld, inst:
270 270 if not wait:
271 271 raise
272 272 self.ui.warn(_("waiting for lock on %s held by %s\n") %
273 273 (desc, inst.args[0]))
274 274 # default to 600 seconds timeout
275 275 l = lock.lock(self.join(lockname),
276 276 int(self.ui.config("ui", "timeout") or 600),
277 277 releasefn, desc=desc)
278 278 if acquirefn:
279 279 acquirefn()
280 280 return l
281 281
282 282 def lock(self, wait=1):
283 283 return self.do_lock("lock", wait, acquirefn=self.reload,
284 284 desc=_('repository %s') % self.origroot)
285 285
286 286 def wlock(self, wait=1):
287 287 return self.do_lock("wlock", wait, self.dirstate.write,
288 288 self.wreload,
289 289 desc=_('working directory of %s') % self.origroot)
290 290
291 291 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
292 292 "determine whether a new filenode is needed"
293 293 fp1 = manifest1.get(filename, nullid)
294 294 fp2 = manifest2.get(filename, nullid)
295 295
296 296 if fp2 != nullid:
297 297 # is one parent an ancestor of the other?
298 298 fpa = filelog.ancestor(fp1, fp2)
299 299 if fpa == fp1:
300 300 fp1, fp2 = fp2, nullid
301 301 elif fpa == fp2:
302 302 fp2 = nullid
303 303
304 304 # is the file unmodified from the parent? report existing entry
305 305 if fp2 == nullid and text == filelog.read(fp1):
306 306 return (fp1, None, None)
307 307
308 308 return (None, fp1, fp2)
309 309
310 310 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
311 311 orig_parent = self.dirstate.parents()[0] or nullid
312 312 p1 = p1 or self.dirstate.parents()[0] or nullid
313 313 p2 = p2 or self.dirstate.parents()[1] or nullid
314 314 c1 = self.changelog.read(p1)
315 315 c2 = self.changelog.read(p2)
316 316 m1 = self.manifest.read(c1[0])
317 317 mf1 = self.manifest.readflags(c1[0])
318 318 m2 = self.manifest.read(c2[0])
319 319 changed = []
320 320
321 321 if orig_parent == p1:
322 322 update_dirstate = 1
323 323 else:
324 324 update_dirstate = 0
325 325
326 326 if not wlock:
327 327 wlock = self.wlock()
328 328 l = self.lock()
329 329 tr = self.transaction()
330 330 mm = m1.copy()
331 331 mfm = mf1.copy()
332 332 linkrev = self.changelog.count()
333 333 for f in files:
334 334 try:
335 335 t = self.wread(f)
336 336 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
337 337 r = self.file(f)
338 338 mfm[f] = tm
339 339
340 340 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
341 341 if entry:
342 342 mm[f] = entry
343 343 continue
344 344
345 345 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
346 346 changed.append(f)
347 347 if update_dirstate:
348 348 self.dirstate.update([f], "n")
349 349 except IOError:
350 350 try:
351 351 del mm[f]
352 352 del mfm[f]
353 353 if update_dirstate:
354 354 self.dirstate.forget([f])
355 355 except:
356 356 # deleted from p2?
357 357 pass
358 358
359 359 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
360 360 user = user or self.ui.username()
361 361 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
362 362 tr.close()
363 363 if update_dirstate:
364 364 self.dirstate.setparents(n, nullid)
365 365
366 366 def commit(self, files=None, text="", user=None, date=None,
367 367 match=util.always, force=False, lock=None, wlock=None):
368 368 commit = []
369 369 remove = []
370 370 changed = []
371 371
372 372 if files:
373 373 for f in files:
374 374 s = self.dirstate.state(f)
375 375 if s in 'nmai':
376 376 commit.append(f)
377 377 elif s == 'r':
378 378 remove.append(f)
379 379 else:
380 380 self.ui.warn(_("%s not tracked!\n") % f)
381 381 else:
382 382 modified, added, removed, deleted, unknown = self.changes(match=match)
383 383 commit = modified + added
384 384 remove = removed
385 385
386 386 p1, p2 = self.dirstate.parents()
387 387 c1 = self.changelog.read(p1)
388 388 c2 = self.changelog.read(p2)
389 389 m1 = self.manifest.read(c1[0])
390 390 mf1 = self.manifest.readflags(c1[0])
391 391 m2 = self.manifest.read(c2[0])
392 392
393 393 if not commit and not remove and not force and p2 == nullid:
394 394 self.ui.status(_("nothing changed\n"))
395 395 return None
396 396
397 397 xp1 = hex(p1)
398 398 if p2 == nullid: xp2 = ''
399 399 else: xp2 = hex(p2)
400 400
401 401 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
402 402
403 403 if not wlock:
404 404 wlock = self.wlock()
405 405 if not lock:
406 406 lock = self.lock()
407 407 tr = self.transaction()
408 408
409 409 # check in files
410 410 new = {}
411 411 linkrev = self.changelog.count()
412 412 commit.sort()
413 413 for f in commit:
414 414 self.ui.note(f + "\n")
415 415 try:
416 416 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
417 417 t = self.wread(f)
418 418 except IOError:
419 419 self.ui.warn(_("trouble committing %s!\n") % f)
420 420 raise
421 421
422 422 r = self.file(f)
423 423
424 424 meta = {}
425 425 cp = self.dirstate.copied(f)
426 426 if cp:
427 427 meta["copy"] = cp
428 428 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
429 429 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
430 430 fp1, fp2 = nullid, nullid
431 431 else:
432 432 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
433 433 if entry:
434 434 new[f] = entry
435 435 continue
436 436
437 437 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
438 438 # remember what we've added so that we can later calculate
439 439 # the files to pull from a set of changesets
440 440 changed.append(f)
441 441
442 442 # update manifest
443 443 m1 = m1.copy()
444 444 m1.update(new)
445 445 for f in remove:
446 446 if f in m1:
447 447 del m1[f]
448 448 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
449 449 (new, remove))
450 450
451 451 # add changeset
452 452 new = new.keys()
453 453 new.sort()
454 454
455 455 user = user or self.ui.username()
456 456 if not text:
457 457 edittext = [""]
458 458 if p2 != nullid:
459 459 edittext.append("HG: branch merge")
460 460 edittext.extend(["HG: changed %s" % f for f in changed])
461 461 edittext.extend(["HG: removed %s" % f for f in remove])
462 462 if not changed and not remove:
463 463 edittext.append("HG: no files changed")
464 464 edittext.append("")
465 465 # run editor in the repository root
466 466 olddir = os.getcwd()
467 467 os.chdir(self.root)
468 468 edittext = self.ui.edit("\n".join(edittext), user)
469 469 os.chdir(olddir)
470 470 if not edittext.rstrip():
471 471 return None
472 472 text = edittext
473 473
474 474 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
475 475 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
476 476 parent2=xp2)
477 477 tr.close()
478 478
479 479 self.dirstate.setparents(n)
480 480 self.dirstate.update(new, "n")
481 481 self.dirstate.forget(remove)
482 482
483 483 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
484 484 return n
485 485
486 486 def walk(self, node=None, files=[], match=util.always):
487 487 if node:
488 488 fdict = dict.fromkeys(files)
489 489 for fn in self.manifest.read(self.changelog.read(node)[0]):
490 490 fdict.pop(fn, None)
491 491 if match(fn):
492 492 yield 'm', fn
493 493 for fn in fdict:
494 494 self.ui.warn(_('%s: No such file in rev %s\n') % (
495 495 util.pathto(self.getcwd(), fn), short(node)))
496 496 else:
497 497 for src, fn in self.dirstate.walk(files, match):
498 498 yield src, fn
499 499
500 500 def changes(self, node1=None, node2=None, files=[], match=util.always,
501 501 wlock=None):
502 502 """return changes between two nodes or node and working directory
503 503
504 504 If node1 is None, use the first dirstate parent instead.
505 505 If node2 is None, compare node1 with working directory.
506 506 """
507 507
508 508 def fcmp(fn, mf):
509 509 t1 = self.wread(fn)
510 510 t2 = self.file(fn).read(mf.get(fn, nullid))
511 511 return cmp(t1, t2)
512 512
513 513 def mfmatches(node):
514 514 change = self.changelog.read(node)
515 515 mf = dict(self.manifest.read(change[0]))
516 516 for fn in mf.keys():
517 517 if not match(fn):
518 518 del mf[fn]
519 519 return mf
520 520
521 521 if node1:
522 522 # read the manifest from node1 before the manifest from node2,
523 523 # so that we'll hit the manifest cache if we're going through
524 524 # all the revisions in parent->child order.
525 525 mf1 = mfmatches(node1)
526 526
527 527 # are we comparing the working directory?
528 528 if not node2:
529 529 if not wlock:
530 530 try:
531 531 wlock = self.wlock(wait=0)
532 532 except lock.LockException:
533 533 wlock = None
534 534 lookup, modified, added, removed, deleted, unknown = (
535 535 self.dirstate.changes(files, match))
536 536
537 537 # are we comparing working dir against its parent?
538 538 if not node1:
539 539 if lookup:
540 540 # do a full compare of any files that might have changed
541 541 mf2 = mfmatches(self.dirstate.parents()[0])
542 542 for f in lookup:
543 543 if fcmp(f, mf2):
544 544 modified.append(f)
545 545 elif wlock is not None:
546 546 self.dirstate.update([f], "n")
547 547 else:
548 548 # we are comparing working dir against non-parent
549 549 # generate a pseudo-manifest for the working dir
550 550 mf2 = mfmatches(self.dirstate.parents()[0])
551 551 for f in lookup + modified + added:
552 552 mf2[f] = ""
553 553 for f in removed:
554 554 if f in mf2:
555 555 del mf2[f]
556 556 else:
557 557 # we are comparing two revisions
558 558 deleted, unknown = [], []
559 559 mf2 = mfmatches(node2)
560 560
561 561 if node1:
562 562 # flush lists from dirstate before comparing manifests
563 563 modified, added = [], []
564 564
565 565 for fn in mf2:
566 566 if mf1.has_key(fn):
567 567 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
568 568 modified.append(fn)
569 569 del mf1[fn]
570 570 else:
571 571 added.append(fn)
572 572
573 573 removed = mf1.keys()
574 574
575 575 # sort and return results:
576 576 for l in modified, added, removed, deleted, unknown:
577 577 l.sort()
578 578 return (modified, added, removed, deleted, unknown)
579 579
580 580 def add(self, list, wlock=None):
581 581 if not wlock:
582 582 wlock = self.wlock()
583 583 for f in list:
584 584 p = self.wjoin(f)
585 585 if not os.path.exists(p):
586 586 self.ui.warn(_("%s does not exist!\n") % f)
587 587 elif not os.path.isfile(p):
588 588 self.ui.warn(_("%s not added: only files supported currently\n")
589 589 % f)
590 590 elif self.dirstate.state(f) in 'an':
591 591 self.ui.warn(_("%s already tracked!\n") % f)
592 592 else:
593 593 self.dirstate.update([f], "a")
594 594
595 595 def forget(self, list, wlock=None):
596 596 if not wlock:
597 597 wlock = self.wlock()
598 598 for f in list:
599 599 if self.dirstate.state(f) not in 'ai':
600 600 self.ui.warn(_("%s not added!\n") % f)
601 601 else:
602 602 self.dirstate.forget([f])
603 603
604 604 def remove(self, list, unlink=False, wlock=None):
605 605 if unlink:
606 606 for f in list:
607 607 try:
608 608 util.unlink(self.wjoin(f))
609 609 except OSError, inst:
610 610 if inst.errno != errno.ENOENT:
611 611 raise
612 612 if not wlock:
613 613 wlock = self.wlock()
614 614 for f in list:
615 615 p = self.wjoin(f)
616 616 if os.path.exists(p):
617 617 self.ui.warn(_("%s still exists!\n") % f)
618 618 elif self.dirstate.state(f) == 'a':
619 619 self.dirstate.forget([f])
620 620 elif f not in self.dirstate:
621 621 self.ui.warn(_("%s not tracked!\n") % f)
622 622 else:
623 623 self.dirstate.update([f], "r")
624 624
625 625 def undelete(self, list, wlock=None):
626 626 p = self.dirstate.parents()[0]
627 627 mn = self.changelog.read(p)[0]
628 628 mf = self.manifest.readflags(mn)
629 629 m = self.manifest.read(mn)
630 630 if not wlock:
631 631 wlock = self.wlock()
632 632 for f in list:
633 633 if self.dirstate.state(f) not in "r":
634 634 self.ui.warn("%s not removed!\n" % f)
635 635 else:
636 636 t = self.file(f).read(m[f])
637 637 self.wwrite(f, t)
638 638 util.set_exec(self.wjoin(f), mf[f])
639 639 self.dirstate.update([f], "n")
640 640
641 641 def copy(self, source, dest, wlock=None):
642 642 p = self.wjoin(dest)
643 643 if not os.path.exists(p):
644 644 self.ui.warn(_("%s does not exist!\n") % dest)
645 645 elif not os.path.isfile(p):
646 646 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
647 647 else:
648 648 if not wlock:
649 649 wlock = self.wlock()
650 650 if self.dirstate.state(dest) == '?':
651 651 self.dirstate.update([dest], "a")
652 652 self.dirstate.copy(source, dest)
653 653
654 654 def heads(self, start=None):
655 655 heads = self.changelog.heads(start)
656 656 # sort the output in rev descending order
657 657 heads = [(-self.changelog.rev(h), h) for h in heads]
658 658 heads.sort()
659 659 return [n for (r, n) in heads]
660 660
661 661 # branchlookup returns a dict giving a list of branches for
662 662 # each head. A branch is defined as the tag of a node or
663 663 # the branch of the node's parents. If a node has multiple
664 664 # branch tags, tags are eliminated if they are visible from other
665 665 # branch tags.
666 666 #
667 667 # So, for this graph: a->b->c->d->e
668 668 # \ /
669 669 # aa -----/
670 670 # a has tag 2.6.12
671 671 # d has tag 2.6.13
672 672 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
673 673 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
674 674 # from the list.
675 675 #
676 676 # It is possible that more than one head will have the same branch tag.
677 677 # callers need to check the result for multiple heads under the same
678 678 # branch tag if that is a problem for them (ie checkout of a specific
679 679 # branch).
680 680 #
681 681 # passing in a specific branch will limit the depth of the search
682 682 # through the parents. It won't limit the branches returned in the
683 683 # result though.
684 684 def branchlookup(self, heads=None, branch=None):
685 685 if not heads:
686 686 heads = self.heads()
687 687 headt = [ h for h in heads ]
688 688 chlog = self.changelog
689 689 branches = {}
690 690 merges = []
691 691 seenmerge = {}
692 692
693 693 # traverse the tree once for each head, recording in the branches
694 694 # dict which tags are visible from this head. The branches
695 695 # dict also records which tags are visible from each tag
696 696 # while we traverse.
697 697 while headt or merges:
698 698 if merges:
699 699 n, found = merges.pop()
700 700 visit = [n]
701 701 else:
702 702 h = headt.pop()
703 703 visit = [h]
704 704 found = [h]
705 705 seen = {}
706 706 while visit:
707 707 n = visit.pop()
708 708 if n in seen:
709 709 continue
710 710 pp = chlog.parents(n)
711 711 tags = self.nodetags(n)
712 712 if tags:
713 713 for x in tags:
714 714 if x == 'tip':
715 715 continue
716 716 for f in found:
717 717 branches.setdefault(f, {})[n] = 1
718 718 branches.setdefault(n, {})[n] = 1
719 719 break
720 720 if n not in found:
721 721 found.append(n)
722 722 if branch in tags:
723 723 continue
724 724 seen[n] = 1
725 725 if pp[1] != nullid and n not in seenmerge:
726 726 merges.append((pp[1], [x for x in found]))
727 727 seenmerge[n] = 1
728 728 if pp[0] != nullid:
729 729 visit.append(pp[0])
730 730 # traverse the branches dict, eliminating branch tags from each
731 731 # head that are visible from another branch tag for that head.
732 732 out = {}
733 733 viscache = {}
734 734 for h in heads:
735 735 def visible(node):
736 736 if node in viscache:
737 737 return viscache[node]
738 738 ret = {}
739 739 visit = [node]
740 740 while visit:
741 741 x = visit.pop()
742 742 if x in viscache:
743 743 ret.update(viscache[x])
744 744 elif x not in ret:
745 745 ret[x] = 1
746 746 if x in branches:
747 747 visit[len(visit):] = branches[x].keys()
748 748 viscache[node] = ret
749 749 return ret
750 750 if h not in branches:
751 751 continue
752 752 # O(n^2), but somewhat limited. This only searches the
753 753 # tags visible from a specific head, not all the tags in the
754 754 # whole repo.
755 755 for b in branches[h]:
756 756 vis = False
757 757 for bb in branches[h].keys():
758 758 if b != bb:
759 759 if b in visible(bb):
760 760 vis = True
761 761 break
762 762 if not vis:
763 763 l = out.setdefault(h, [])
764 764 l[len(l):] = self.nodetags(b)
765 765 return out
766 766
767 767 def branches(self, nodes):
768 768 if not nodes:
769 769 nodes = [self.changelog.tip()]
770 770 b = []
771 771 for n in nodes:
772 772 t = n
773 773 while n:
774 774 p = self.changelog.parents(n)
775 775 if p[1] != nullid or p[0] == nullid:
776 776 b.append((t, n, p[0], p[1]))
777 777 break
778 778 n = p[0]
779 779 return b
780 780
781 781 def between(self, pairs):
782 782 r = []
783 783
784 784 for top, bottom in pairs:
785 785 n, l, i = top, [], 0
786 786 f = 1
787 787
788 788 while n != bottom:
789 789 p = self.changelog.parents(n)[0]
790 790 if i == f:
791 791 l.append(n)
792 792 f = f * 2
793 793 n = p
794 794 i += 1
795 795
796 796 r.append(l)
797 797
798 798 return r
799 799
800 800 def findincoming(self, remote, base=None, heads=None, force=False):
801 801 m = self.changelog.nodemap
802 802 search = []
803 803 fetch = {}
804 804 seen = {}
805 805 seenbranch = {}
806 806 if base == None:
807 807 base = {}
808 808
809 809 # assume we're closer to the tip than the root
810 810 # and start by examining the heads
811 811 self.ui.status(_("searching for changes\n"))
812 812
813 813 if not heads:
814 814 heads = remote.heads()
815 815
816 816 unknown = []
817 817 for h in heads:
818 818 if h not in m:
819 819 unknown.append(h)
820 820 else:
821 821 base[h] = 1
822 822
823 823 if not unknown:
824 824 return []
825 825
826 826 rep = {}
827 827 reqcnt = 0
828 828
829 829 # search through remote branches
830 830 # a 'branch' here is a linear segment of history, with four parts:
831 831 # head, root, first parent, second parent
832 832 # (a branch always has two parents (or none) by definition)
833 833 unknown = remote.branches(unknown)
834 834 while unknown:
835 835 r = []
836 836 while unknown:
837 837 n = unknown.pop(0)
838 838 if n[0] in seen:
839 839 continue
840 840
841 841 self.ui.debug(_("examining %s:%s\n")
842 842 % (short(n[0]), short(n[1])))
843 843 if n[0] == nullid:
844 844 break
845 845 if n in seenbranch:
846 846 self.ui.debug(_("branch already found\n"))
847 847 continue
848 848 if n[1] and n[1] in m: # do we know the base?
849 849 self.ui.debug(_("found incomplete branch %s:%s\n")
850 850 % (short(n[0]), short(n[1])))
851 851 search.append(n) # schedule branch range for scanning
852 852 seenbranch[n] = 1
853 853 else:
854 854 if n[1] not in seen and n[1] not in fetch:
855 855 if n[2] in m and n[3] in m:
856 856 self.ui.debug(_("found new changeset %s\n") %
857 857 short(n[1]))
858 858 fetch[n[1]] = 1 # earliest unknown
859 859 base[n[2]] = 1 # latest known
860 860 continue
861 861
862 862 for a in n[2:4]:
863 863 if a not in rep:
864 864 r.append(a)
865 865 rep[a] = 1
866 866
867 867 seen[n[0]] = 1
868 868
869 869 if r:
870 870 reqcnt += 1
871 871 self.ui.debug(_("request %d: %s\n") %
872 872 (reqcnt, " ".join(map(short, r))))
873 873 for p in range(0, len(r), 10):
874 874 for b in remote.branches(r[p:p+10]):
875 875 self.ui.debug(_("received %s:%s\n") %
876 876 (short(b[0]), short(b[1])))
877 877 if b[0] in m:
878 878 self.ui.debug(_("found base node %s\n")
879 879 % short(b[0]))
880 880 base[b[0]] = 1
881 881 elif b[0] not in seen:
882 882 unknown.append(b)
883 883
884 884 # do binary search on the branches we found
885 885 while search:
886 886 n = search.pop(0)
887 887 reqcnt += 1
888 888 l = remote.between([(n[0], n[1])])[0]
889 889 l.append(n[1])
890 890 p = n[0]
891 891 f = 1
892 892 for i in l:
893 893 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
894 894 if i in m:
895 895 if f <= 2:
896 896 self.ui.debug(_("found new branch changeset %s\n") %
897 897 short(p))
898 898 fetch[p] = 1
899 899 base[i] = 1
900 900 else:
901 901 self.ui.debug(_("narrowed branch search to %s:%s\n")
902 902 % (short(p), short(i)))
903 903 search.append((p, i))
904 904 break
905 905 p, f = i, f * 2
906 906
907 907 # sanity check our fetch list
908 908 for f in fetch.keys():
909 909 if f in m:
910 910 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
911 911
912 912 if base.keys() == [nullid]:
913 913 if force:
914 914 self.ui.warn(_("warning: repository is unrelated\n"))
915 915 else:
916 916 raise util.Abort(_("repository is unrelated"))
917 917
918 918 self.ui.note(_("found new changesets starting at ") +
919 919 " ".join([short(f) for f in fetch]) + "\n")
920 920
921 921 self.ui.debug(_("%d total queries\n") % reqcnt)
922 922
923 923 return fetch.keys()
924 924
925 925 def findoutgoing(self, remote, base=None, heads=None, force=False):
926 """Return list of nodes that are roots of subsets not in remote
927
928 If base dict is specified, assume that these nodes and their parents
929 exist on the remote side.
930 If a list of heads is specified, return only nodes which are heads
931 or ancestors of these heads, and return a second element which
932 contains all remote heads which get new children.
933 """
926 934 if base == None:
927 935 base = {}
928 936 self.findincoming(remote, base, heads, force=force)
929 937
930 938 self.ui.debug(_("common changesets up to ")
931 939 + " ".join(map(short, base.keys())) + "\n")
932 940
933 941 remain = dict.fromkeys(self.changelog.nodemap)
934 942
935 943 # prune everything remote has from the tree
936 944 del remain[nullid]
937 945 remove = base.keys()
938 946 while remove:
939 947 n = remove.pop(0)
940 948 if n in remain:
941 949 del remain[n]
942 950 for p in self.changelog.parents(n):
943 951 remove.append(p)
944 952
945 953 # find every node whose parents have been pruned
946 954 subset = []
955 # find every remote head that will get new children
956 updated_heads = {}
947 957 for n in remain:
948 958 p1, p2 = self.changelog.parents(n)
949 959 if p1 not in remain and p2 not in remain:
950 960 subset.append(n)
961 if heads:
962 if p1 in heads:
963 updated_heads[p1] = True
964 if p2 in heads:
965 updated_heads[p2] = True
951 966
952 967 # this is the set of all roots we have to push
953 return subset
968 if heads:
969 return subset, updated_heads.keys()
970 else:
971 return subset
954 972
955 973 def pull(self, remote, heads=None, force=False):
956 974 l = self.lock()
957 975
958 976 # if we have an empty repo, fetch everything
959 977 if self.changelog.tip() == nullid:
960 978 self.ui.status(_("requesting all changes\n"))
961 979 fetch = [nullid]
962 980 else:
963 981 fetch = self.findincoming(remote, force=force)
964 982
965 983 if not fetch:
966 984 self.ui.status(_("no changes found\n"))
967 985 return 0
968 986
969 987 if heads is None:
970 988 cg = remote.changegroup(fetch, 'pull')
971 989 else:
972 990 cg = remote.changegroupsubset(fetch, heads, 'pull')
973 991 return self.addchangegroup(cg)
974 992
975 993 def push(self, remote, force=False, revs=None):
976 994 lock = remote.lock()
977 995
978 996 base = {}
979 heads = remote.heads()
980 inc = self.findincoming(remote, base, heads, force=force)
997 remote_heads = remote.heads()
998 inc = self.findincoming(remote, base, remote_heads, force=force)
981 999 if not force and inc:
982 1000 self.ui.warn(_("abort: unsynced remote changes!\n"))
983 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
1001 self.ui.status(_("(did you forget to sync?"
1002 " use push -f to force)\n"))
984 1003 return 1
985 1004
986 update = self.findoutgoing(remote, base)
1005 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
987 1006 if revs is not None:
988 1007 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
989 1008 else:
990 1009 bases, heads = update, self.changelog.heads()
991 1010
992 1011 if not bases:
993 1012 self.ui.status(_("no changes found\n"))
994 1013 return 1
995 1014 elif not force:
996 if len(bases) < len(heads):
1015 if revs is not None:
1016 updated_heads = {}
1017 for base in msng_cl:
1018 for parent in self.changelog.parents(base):
1019 if parent in remote_heads:
1020 updated_heads[parent] = True
1021 updated_heads = updated_heads.keys()
1022 if len(updated_heads) < len(heads):
997 1023 self.ui.warn(_("abort: push creates new remote branches!\n"))
998 1024 self.ui.status(_("(did you forget to merge?"
999 1025 " use push -f to force)\n"))
1000 1026 return 1
1001 1027
1002 1028 if revs is None:
1003 1029 cg = self.changegroup(update, 'push')
1004 1030 else:
1005 1031 cg = self.changegroupsubset(update, revs, 'push')
1006 1032 return remote.addchangegroup(cg)
1007 1033
1008 1034 def changegroupsubset(self, bases, heads, source):
1009 1035 """This function generates a changegroup consisting of all the nodes
1010 1036 that are descendents of any of the bases, and ancestors of any of
1011 1037 the heads.
1012 1038
1013 1039 It is fairly complex as determining which filenodes and which
1014 1040 manifest nodes need to be included for the changeset to be complete
1015 1041 is non-trivial.
1016 1042
1017 1043 Another wrinkle is doing the reverse, figuring out which changeset in
1018 1044 the changegroup a particular filenode or manifestnode belongs to."""
1019 1045
1020 1046 self.hook('preoutgoing', throw=True, source=source)
1021 1047
1022 1048 # Set up some initial variables
1023 1049 # Make it easy to refer to self.changelog
1024 1050 cl = self.changelog
1025 1051 # msng is short for missing - compute the list of changesets in this
1026 1052 # changegroup.
1027 1053 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1028 1054 # Some bases may turn out to be superfluous, and some heads may be
1029 1055 # too. nodesbetween will return the minimal set of bases and heads
1030 1056 # necessary to re-create the changegroup.
1031 1057
1032 1058 # Known heads are the list of heads that it is assumed the recipient
1033 1059 # of this changegroup will know about.
1034 1060 knownheads = {}
1035 1061 # We assume that all parents of bases are known heads.
1036 1062 for n in bases:
1037 1063 for p in cl.parents(n):
1038 1064 if p != nullid:
1039 1065 knownheads[p] = 1
1040 1066 knownheads = knownheads.keys()
1041 1067 if knownheads:
1042 1068 # Now that we know what heads are known, we can compute which
1043 1069 # changesets are known. The recipient must know about all
1044 1070 # changesets required to reach the known heads from the null
1045 1071 # changeset.
1046 1072 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1047 1073 junk = None
1048 1074 # Transform the list into an ersatz set.
1049 1075 has_cl_set = dict.fromkeys(has_cl_set)
1050 1076 else:
1051 1077 # If there were no known heads, the recipient cannot be assumed to
1052 1078 # know about any changesets.
1053 1079 has_cl_set = {}
1054 1080
1055 1081 # Make it easy to refer to self.manifest
1056 1082 mnfst = self.manifest
1057 1083 # We don't know which manifests are missing yet
1058 1084 msng_mnfst_set = {}
1059 1085 # Nor do we know which filenodes are missing.
1060 1086 msng_filenode_set = {}
1061 1087
1062 1088 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1063 1089 junk = None
1064 1090
1065 1091 # A changeset always belongs to itself, so the changenode lookup
1066 1092 # function for a changenode is identity.
1067 1093 def identity(x):
1068 1094 return x
1069 1095
1070 1096 # A function generating function. Sets up an environment for the
1071 1097 # inner function.
1072 1098 def cmp_by_rev_func(revlog):
1073 1099 # Compare two nodes by their revision number in the environment's
1074 1100 # revision history. Since the revision number both represents the
1075 1101 # most efficient order to read the nodes in, and represents a
1076 1102 # topological sorting of the nodes, this function is often useful.
1077 1103 def cmp_by_rev(a, b):
1078 1104 return cmp(revlog.rev(a), revlog.rev(b))
1079 1105 return cmp_by_rev
1080 1106
1081 1107 # If we determine that a particular file or manifest node must be a
1082 1108 # node that the recipient of the changegroup will already have, we can
1083 1109 # also assume the recipient will have all the parents. This function
1084 1110 # prunes them from the set of missing nodes.
1085 1111 def prune_parents(revlog, hasset, msngset):
1086 1112 haslst = hasset.keys()
1087 1113 haslst.sort(cmp_by_rev_func(revlog))
1088 1114 for node in haslst:
1089 1115 parentlst = [p for p in revlog.parents(node) if p != nullid]
1090 1116 while parentlst:
1091 1117 n = parentlst.pop()
1092 1118 if n not in hasset:
1093 1119 hasset[n] = 1
1094 1120 p = [p for p in revlog.parents(n) if p != nullid]
1095 1121 parentlst.extend(p)
1096 1122 for n in hasset:
1097 1123 msngset.pop(n, None)
1098 1124
1099 1125 # This is a function generating function used to set up an environment
1100 1126 # for the inner function to execute in.
1101 1127 def manifest_and_file_collector(changedfileset):
1102 1128 # This is an information gathering function that gathers
1103 1129 # information from each changeset node that goes out as part of
1104 1130 # the changegroup. The information gathered is a list of which
1105 1131 # manifest nodes are potentially required (the recipient may
1106 1132 # already have them) and total list of all files which were
1107 1133 # changed in any changeset in the changegroup.
1108 1134 #
1109 1135 # We also remember the first changenode we saw any manifest
1110 1136 # referenced by so we can later determine which changenode 'owns'
1111 1137 # the manifest.
1112 1138 def collect_manifests_and_files(clnode):
1113 1139 c = cl.read(clnode)
1114 1140 for f in c[3]:
1115 1141 # This is to make sure we only have one instance of each
1116 1142 # filename string for each filename.
1117 1143 changedfileset.setdefault(f, f)
1118 1144 msng_mnfst_set.setdefault(c[0], clnode)
1119 1145 return collect_manifests_and_files
1120 1146
1121 1147 # Figure out which manifest nodes (of the ones we think might be part
1122 1148 # of the changegroup) the recipient must know about and remove them
1123 1149 # from the changegroup.
1124 1150 def prune_manifests():
1125 1151 has_mnfst_set = {}
1126 1152 for n in msng_mnfst_set:
1127 1153 # If a 'missing' manifest thinks it belongs to a changenode
1128 1154 # the recipient is assumed to have, obviously the recipient
1129 1155 # must have that manifest.
1130 1156 linknode = cl.node(mnfst.linkrev(n))
1131 1157 if linknode in has_cl_set:
1132 1158 has_mnfst_set[n] = 1
1133 1159 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1134 1160
1135 1161 # Use the information collected in collect_manifests_and_files to say
1136 1162 # which changenode any manifestnode belongs to.
1137 1163 def lookup_manifest_link(mnfstnode):
1138 1164 return msng_mnfst_set[mnfstnode]
1139 1165
1140 1166 # A function generating function that sets up the initial environment
1141 1167 # the inner function.
1142 1168 def filenode_collector(changedfiles):
1143 1169 next_rev = [0]
1144 1170 # This gathers information from each manifestnode included in the
1145 1171 # changegroup about which filenodes the manifest node references
1146 1172 # so we can include those in the changegroup too.
1147 1173 #
1148 1174 # It also remembers which changenode each filenode belongs to. It
1149 1175 # does this by assuming the a filenode belongs to the changenode
1150 1176 # the first manifest that references it belongs to.
1151 1177 def collect_msng_filenodes(mnfstnode):
1152 1178 r = mnfst.rev(mnfstnode)
1153 1179 if r == next_rev[0]:
1154 1180 # If the last rev we looked at was the one just previous,
1155 1181 # we only need to see a diff.
1156 1182 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1157 1183 # For each line in the delta
1158 1184 for dline in delta.splitlines():
1159 1185 # get the filename and filenode for that line
1160 1186 f, fnode = dline.split('\0')
1161 1187 fnode = bin(fnode[:40])
1162 1188 f = changedfiles.get(f, None)
1163 1189 # And if the file is in the list of files we care
1164 1190 # about.
1165 1191 if f is not None:
1166 1192 # Get the changenode this manifest belongs to
1167 1193 clnode = msng_mnfst_set[mnfstnode]
1168 1194 # Create the set of filenodes for the file if
1169 1195 # there isn't one already.
1170 1196 ndset = msng_filenode_set.setdefault(f, {})
1171 1197 # And set the filenode's changelog node to the
1172 1198 # manifest's if it hasn't been set already.
1173 1199 ndset.setdefault(fnode, clnode)
1174 1200 else:
1175 1201 # Otherwise we need a full manifest.
1176 1202 m = mnfst.read(mnfstnode)
1177 1203 # For every file in we care about.
1178 1204 for f in changedfiles:
1179 1205 fnode = m.get(f, None)
1180 1206 # If it's in the manifest
1181 1207 if fnode is not None:
1182 1208 # See comments above.
1183 1209 clnode = msng_mnfst_set[mnfstnode]
1184 1210 ndset = msng_filenode_set.setdefault(f, {})
1185 1211 ndset.setdefault(fnode, clnode)
1186 1212 # Remember the revision we hope to see next.
1187 1213 next_rev[0] = r + 1
1188 1214 return collect_msng_filenodes
1189 1215
1190 1216 # We have a list of filenodes we think we need for a file, lets remove
1191 1217 # all those we now the recipient must have.
1192 1218 def prune_filenodes(f, filerevlog):
1193 1219 msngset = msng_filenode_set[f]
1194 1220 hasset = {}
1195 1221 # If a 'missing' filenode thinks it belongs to a changenode we
1196 1222 # assume the recipient must have, then the recipient must have
1197 1223 # that filenode.
1198 1224 for n in msngset:
1199 1225 clnode = cl.node(filerevlog.linkrev(n))
1200 1226 if clnode in has_cl_set:
1201 1227 hasset[n] = 1
1202 1228 prune_parents(filerevlog, hasset, msngset)
1203 1229
1204 1230 # A function generator function that sets up the a context for the
1205 1231 # inner function.
1206 1232 def lookup_filenode_link_func(fname):
1207 1233 msngset = msng_filenode_set[fname]
1208 1234 # Lookup the changenode the filenode belongs to.
1209 1235 def lookup_filenode_link(fnode):
1210 1236 return msngset[fnode]
1211 1237 return lookup_filenode_link
1212 1238
1213 1239 # Now that we have all theses utility functions to help out and
1214 1240 # logically divide up the task, generate the group.
1215 1241 def gengroup():
1216 1242 # The set of changed files starts empty.
1217 1243 changedfiles = {}
1218 1244 # Create a changenode group generator that will call our functions
1219 1245 # back to lookup the owning changenode and collect information.
1220 1246 group = cl.group(msng_cl_lst, identity,
1221 1247 manifest_and_file_collector(changedfiles))
1222 1248 for chnk in group:
1223 1249 yield chnk
1224 1250
1225 1251 # The list of manifests has been collected by the generator
1226 1252 # calling our functions back.
1227 1253 prune_manifests()
1228 1254 msng_mnfst_lst = msng_mnfst_set.keys()
1229 1255 # Sort the manifestnodes by revision number.
1230 1256 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1231 1257 # Create a generator for the manifestnodes that calls our lookup
1232 1258 # and data collection functions back.
1233 1259 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1234 1260 filenode_collector(changedfiles))
1235 1261 for chnk in group:
1236 1262 yield chnk
1237 1263
1238 1264 # These are no longer needed, dereference and toss the memory for
1239 1265 # them.
1240 1266 msng_mnfst_lst = None
1241 1267 msng_mnfst_set.clear()
1242 1268
1243 1269 changedfiles = changedfiles.keys()
1244 1270 changedfiles.sort()
1245 1271 # Go through all our files in order sorted by name.
1246 1272 for fname in changedfiles:
1247 1273 filerevlog = self.file(fname)
1248 1274 # Toss out the filenodes that the recipient isn't really
1249 1275 # missing.
1250 1276 if msng_filenode_set.has_key(fname):
1251 1277 prune_filenodes(fname, filerevlog)
1252 1278 msng_filenode_lst = msng_filenode_set[fname].keys()
1253 1279 else:
1254 1280 msng_filenode_lst = []
1255 1281 # If any filenodes are left, generate the group for them,
1256 1282 # otherwise don't bother.
1257 1283 if len(msng_filenode_lst) > 0:
1258 1284 yield changegroup.genchunk(fname)
1259 1285 # Sort the filenodes by their revision #
1260 1286 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1261 1287 # Create a group generator and only pass in a changenode
1262 1288 # lookup function as we need to collect no information
1263 1289 # from filenodes.
1264 1290 group = filerevlog.group(msng_filenode_lst,
1265 1291 lookup_filenode_link_func(fname))
1266 1292 for chnk in group:
1267 1293 yield chnk
1268 1294 if msng_filenode_set.has_key(fname):
1269 1295 # Don't need this anymore, toss it to free memory.
1270 1296 del msng_filenode_set[fname]
1271 1297 # Signal that no more groups are left.
1272 1298 yield changegroup.closechunk()
1273 1299
1274 1300 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1275 1301
1276 1302 return util.chunkbuffer(gengroup())
1277 1303
1278 1304 def changegroup(self, basenodes, source):
1279 1305 """Generate a changegroup of all nodes that we have that a recipient
1280 1306 doesn't.
1281 1307
1282 1308 This is much easier than the previous function as we can assume that
1283 1309 the recipient has any changenode we aren't sending them."""
1284 1310
1285 1311 self.hook('preoutgoing', throw=True, source=source)
1286 1312
1287 1313 cl = self.changelog
1288 1314 nodes = cl.nodesbetween(basenodes, None)[0]
1289 1315 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1290 1316
1291 1317 def identity(x):
1292 1318 return x
1293 1319
1294 1320 def gennodelst(revlog):
1295 1321 for r in xrange(0, revlog.count()):
1296 1322 n = revlog.node(r)
1297 1323 if revlog.linkrev(n) in revset:
1298 1324 yield n
1299 1325
1300 1326 def changed_file_collector(changedfileset):
1301 1327 def collect_changed_files(clnode):
1302 1328 c = cl.read(clnode)
1303 1329 for fname in c[3]:
1304 1330 changedfileset[fname] = 1
1305 1331 return collect_changed_files
1306 1332
1307 1333 def lookuprevlink_func(revlog):
1308 1334 def lookuprevlink(n):
1309 1335 return cl.node(revlog.linkrev(n))
1310 1336 return lookuprevlink
1311 1337
1312 1338 def gengroup():
1313 1339 # construct a list of all changed files
1314 1340 changedfiles = {}
1315 1341
1316 1342 for chnk in cl.group(nodes, identity,
1317 1343 changed_file_collector(changedfiles)):
1318 1344 yield chnk
1319 1345 changedfiles = changedfiles.keys()
1320 1346 changedfiles.sort()
1321 1347
1322 1348 mnfst = self.manifest
1323 1349 nodeiter = gennodelst(mnfst)
1324 1350 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1325 1351 yield chnk
1326 1352
1327 1353 for fname in changedfiles:
1328 1354 filerevlog = self.file(fname)
1329 1355 nodeiter = gennodelst(filerevlog)
1330 1356 nodeiter = list(nodeiter)
1331 1357 if nodeiter:
1332 1358 yield changegroup.genchunk(fname)
1333 1359 lookup = lookuprevlink_func(filerevlog)
1334 1360 for chnk in filerevlog.group(nodeiter, lookup):
1335 1361 yield chnk
1336 1362
1337 1363 yield changegroup.closechunk()
1338 1364 self.hook('outgoing', node=hex(nodes[0]), source=source)
1339 1365
1340 1366 return util.chunkbuffer(gengroup())
1341 1367
1342 1368 def addchangegroup(self, source):
1343 1369 """add changegroup to repo.
1344 1370 returns number of heads modified or added + 1."""
1345 1371
1346 1372 def csmap(x):
1347 1373 self.ui.debug(_("add changeset %s\n") % short(x))
1348 1374 return cl.count()
1349 1375
1350 1376 def revmap(x):
1351 1377 return cl.rev(x)
1352 1378
1353 1379 if not source:
1354 1380 return 0
1355 1381
1356 1382 self.hook('prechangegroup', throw=True)
1357 1383
1358 1384 changesets = files = revisions = 0
1359 1385
1360 1386 tr = self.transaction()
1361 1387
1362 1388 # write changelog and manifest data to temp files so
1363 1389 # concurrent readers will not see inconsistent view
1364 1390 cl = appendfile.appendchangelog(self.opener)
1365 1391
1366 1392 oldheads = len(cl.heads())
1367 1393
1368 1394 # pull off the changeset group
1369 1395 self.ui.status(_("adding changesets\n"))
1370 1396 co = cl.tip()
1371 1397 chunkiter = changegroup.chunkiter(source)
1372 1398 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1373 1399 cnr, cor = map(cl.rev, (cn, co))
1374 1400 if cn == nullid:
1375 1401 cnr = cor
1376 1402 changesets = cnr - cor
1377 1403
1378 1404 mf = appendfile.appendmanifest(self.opener)
1379 1405
1380 1406 # pull off the manifest group
1381 1407 self.ui.status(_("adding manifests\n"))
1382 1408 mm = mf.tip()
1383 1409 chunkiter = changegroup.chunkiter(source)
1384 1410 mo = mf.addgroup(chunkiter, revmap, tr)
1385 1411
1386 1412 # process the files
1387 1413 self.ui.status(_("adding file changes\n"))
1388 1414 while 1:
1389 1415 f = changegroup.getchunk(source)
1390 1416 if not f:
1391 1417 break
1392 1418 self.ui.debug(_("adding %s revisions\n") % f)
1393 1419 fl = self.file(f)
1394 1420 o = fl.count()
1395 1421 chunkiter = changegroup.chunkiter(source)
1396 1422 n = fl.addgroup(chunkiter, revmap, tr)
1397 1423 revisions += fl.count() - o
1398 1424 files += 1
1399 1425
1400 1426 # write order here is important so concurrent readers will see
1401 1427 # consistent view of repo
1402 1428 mf.writedata()
1403 1429 cl.writedata()
1404 1430
1405 1431 # make changelog and manifest see real files again
1406 1432 self.changelog = changelog.changelog(self.opener)
1407 1433 self.manifest = manifest.manifest(self.opener)
1408 1434
1409 1435 newheads = len(self.changelog.heads())
1410 1436 heads = ""
1411 1437 if oldheads and newheads > oldheads:
1412 1438 heads = _(" (+%d heads)") % (newheads - oldheads)
1413 1439
1414 1440 self.ui.status(_("added %d changesets"
1415 1441 " with %d changes to %d files%s\n")
1416 1442 % (changesets, revisions, files, heads))
1417 1443
1418 1444 self.hook('pretxnchangegroup', throw=True,
1419 1445 node=hex(self.changelog.node(cor+1)))
1420 1446
1421 1447 tr.close()
1422 1448
1423 1449 if changesets > 0:
1424 1450 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1425 1451
1426 1452 for i in range(cor + 1, cnr + 1):
1427 1453 self.hook("incoming", node=hex(self.changelog.node(i)))
1428 1454
1429 1455 return newheads - oldheads + 1
1430 1456
1431 1457 def update(self, node, allow=False, force=False, choose=None,
1432 1458 moddirstate=True, forcemerge=False, wlock=None):
1433 1459 pl = self.dirstate.parents()
1434 1460 if not force and pl[1] != nullid:
1435 1461 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1436 1462 return 1
1437 1463
1438 1464 err = False
1439 1465
1440 1466 p1, p2 = pl[0], node
1441 1467 pa = self.changelog.ancestor(p1, p2)
1442 1468 m1n = self.changelog.read(p1)[0]
1443 1469 m2n = self.changelog.read(p2)[0]
1444 1470 man = self.manifest.ancestor(m1n, m2n)
1445 1471 m1 = self.manifest.read(m1n)
1446 1472 mf1 = self.manifest.readflags(m1n)
1447 1473 m2 = self.manifest.read(m2n).copy()
1448 1474 mf2 = self.manifest.readflags(m2n)
1449 1475 ma = self.manifest.read(man)
1450 1476 mfa = self.manifest.readflags(man)
1451 1477
1452 1478 modified, added, removed, deleted, unknown = self.changes()
1453 1479
1454 1480 # is this a jump, or a merge? i.e. is there a linear path
1455 1481 # from p1 to p2?
1456 1482 linear_path = (pa == p1 or pa == p2)
1457 1483
1458 1484 if allow and linear_path:
1459 1485 raise util.Abort(_("there is nothing to merge, "
1460 1486 "just use 'hg update'"))
1461 1487 if allow and not forcemerge:
1462 1488 if modified or added or removed:
1463 1489 raise util.Abort(_("outstanding uncommitted changes"))
1464 1490 if not forcemerge and not force:
1465 1491 for f in unknown:
1466 1492 if f in m2:
1467 1493 t1 = self.wread(f)
1468 1494 t2 = self.file(f).read(m2[f])
1469 1495 if cmp(t1, t2) != 0:
1470 1496 raise util.Abort(_("'%s' already exists in the working"
1471 1497 " dir and differs from remote") % f)
1472 1498
1473 1499 # resolve the manifest to determine which files
1474 1500 # we care about merging
1475 1501 self.ui.note(_("resolving manifests\n"))
1476 1502 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1477 1503 (force, allow, moddirstate, linear_path))
1478 1504 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1479 1505 (short(man), short(m1n), short(m2n)))
1480 1506
1481 1507 merge = {}
1482 1508 get = {}
1483 1509 remove = []
1484 1510
1485 1511 # construct a working dir manifest
1486 1512 mw = m1.copy()
1487 1513 mfw = mf1.copy()
1488 1514 umap = dict.fromkeys(unknown)
1489 1515
1490 1516 for f in added + modified + unknown:
1491 1517 mw[f] = ""
1492 1518 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1493 1519
1494 1520 if moddirstate and not wlock:
1495 1521 wlock = self.wlock()
1496 1522
1497 1523 for f in deleted + removed:
1498 1524 if f in mw:
1499 1525 del mw[f]
1500 1526
1501 1527 # If we're jumping between revisions (as opposed to merging),
1502 1528 # and if neither the working directory nor the target rev has
1503 1529 # the file, then we need to remove it from the dirstate, to
1504 1530 # prevent the dirstate from listing the file when it is no
1505 1531 # longer in the manifest.
1506 1532 if moddirstate and linear_path and f not in m2:
1507 1533 self.dirstate.forget((f,))
1508 1534
1509 1535 # Compare manifests
1510 1536 for f, n in mw.iteritems():
1511 1537 if choose and not choose(f):
1512 1538 continue
1513 1539 if f in m2:
1514 1540 s = 0
1515 1541
1516 1542 # is the wfile new since m1, and match m2?
1517 1543 if f not in m1:
1518 1544 t1 = self.wread(f)
1519 1545 t2 = self.file(f).read(m2[f])
1520 1546 if cmp(t1, t2) == 0:
1521 1547 n = m2[f]
1522 1548 del t1, t2
1523 1549
1524 1550 # are files different?
1525 1551 if n != m2[f]:
1526 1552 a = ma.get(f, nullid)
1527 1553 # are both different from the ancestor?
1528 1554 if n != a and m2[f] != a:
1529 1555 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1530 1556 # merge executable bits
1531 1557 # "if we changed or they changed, change in merge"
1532 1558 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1533 1559 mode = ((a^b) | (a^c)) ^ a
1534 1560 merge[f] = (m1.get(f, nullid), m2[f], mode)
1535 1561 s = 1
1536 1562 # are we clobbering?
1537 1563 # is remote's version newer?
1538 1564 # or are we going back in time?
1539 1565 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1540 1566 self.ui.debug(_(" remote %s is newer, get\n") % f)
1541 1567 get[f] = m2[f]
1542 1568 s = 1
1543 1569 elif f in umap:
1544 1570 # this unknown file is the same as the checkout
1545 1571 get[f] = m2[f]
1546 1572
1547 1573 if not s and mfw[f] != mf2[f]:
1548 1574 if force:
1549 1575 self.ui.debug(_(" updating permissions for %s\n") % f)
1550 1576 util.set_exec(self.wjoin(f), mf2[f])
1551 1577 else:
1552 1578 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1553 1579 mode = ((a^b) | (a^c)) ^ a
1554 1580 if mode != b:
1555 1581 self.ui.debug(_(" updating permissions for %s\n")
1556 1582 % f)
1557 1583 util.set_exec(self.wjoin(f), mode)
1558 1584 del m2[f]
1559 1585 elif f in ma:
1560 1586 if n != ma[f]:
1561 1587 r = _("d")
1562 1588 if not force and (linear_path or allow):
1563 1589 r = self.ui.prompt(
1564 1590 (_(" local changed %s which remote deleted\n") % f) +
1565 1591 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1566 1592 if r == _("d"):
1567 1593 remove.append(f)
1568 1594 else:
1569 1595 self.ui.debug(_("other deleted %s\n") % f)
1570 1596 remove.append(f) # other deleted it
1571 1597 else:
1572 1598 # file is created on branch or in working directory
1573 1599 if force and f not in umap:
1574 1600 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1575 1601 remove.append(f)
1576 1602 elif n == m1.get(f, nullid): # same as parent
1577 1603 if p2 == pa: # going backwards?
1578 1604 self.ui.debug(_("remote deleted %s\n") % f)
1579 1605 remove.append(f)
1580 1606 else:
1581 1607 self.ui.debug(_("local modified %s, keeping\n") % f)
1582 1608 else:
1583 1609 self.ui.debug(_("working dir created %s, keeping\n") % f)
1584 1610
1585 1611 for f, n in m2.iteritems():
1586 1612 if choose and not choose(f):
1587 1613 continue
1588 1614 if f[0] == "/":
1589 1615 continue
1590 1616 if f in ma and n != ma[f]:
1591 1617 r = _("k")
1592 1618 if not force and (linear_path or allow):
1593 1619 r = self.ui.prompt(
1594 1620 (_("remote changed %s which local deleted\n") % f) +
1595 1621 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1596 1622 if r == _("k"):
1597 1623 get[f] = n
1598 1624 elif f not in ma:
1599 1625 self.ui.debug(_("remote created %s\n") % f)
1600 1626 get[f] = n
1601 1627 else:
1602 1628 if force or p2 == pa: # going backwards?
1603 1629 self.ui.debug(_("local deleted %s, recreating\n") % f)
1604 1630 get[f] = n
1605 1631 else:
1606 1632 self.ui.debug(_("local deleted %s\n") % f)
1607 1633
1608 1634 del mw, m1, m2, ma
1609 1635
1610 1636 if force:
1611 1637 for f in merge:
1612 1638 get[f] = merge[f][1]
1613 1639 merge = {}
1614 1640
1615 1641 if linear_path or force:
1616 1642 # we don't need to do any magic, just jump to the new rev
1617 1643 branch_merge = False
1618 1644 p1, p2 = p2, nullid
1619 1645 else:
1620 1646 if not allow:
1621 1647 self.ui.status(_("this update spans a branch"
1622 1648 " affecting the following files:\n"))
1623 1649 fl = merge.keys() + get.keys()
1624 1650 fl.sort()
1625 1651 for f in fl:
1626 1652 cf = ""
1627 1653 if f in merge:
1628 1654 cf = _(" (resolve)")
1629 1655 self.ui.status(" %s%s\n" % (f, cf))
1630 1656 self.ui.warn(_("aborting update spanning branches!\n"))
1631 1657 self.ui.status(_("(use 'hg merge' to merge across branches"
1632 1658 " or '-C' to lose changes)\n"))
1633 1659 return 1
1634 1660 branch_merge = True
1635 1661
1636 1662 # get the files we don't need to change
1637 1663 files = get.keys()
1638 1664 files.sort()
1639 1665 for f in files:
1640 1666 if f[0] == "/":
1641 1667 continue
1642 1668 self.ui.note(_("getting %s\n") % f)
1643 1669 t = self.file(f).read(get[f])
1644 1670 self.wwrite(f, t)
1645 1671 util.set_exec(self.wjoin(f), mf2[f])
1646 1672 if moddirstate:
1647 1673 if branch_merge:
1648 1674 self.dirstate.update([f], 'n', st_mtime=-1)
1649 1675 else:
1650 1676 self.dirstate.update([f], 'n')
1651 1677
1652 1678 # merge the tricky bits
1653 1679 failedmerge = []
1654 1680 files = merge.keys()
1655 1681 files.sort()
1656 1682 xp1 = hex(p1)
1657 1683 xp2 = hex(p2)
1658 1684 for f in files:
1659 1685 self.ui.status(_("merging %s\n") % f)
1660 1686 my, other, flag = merge[f]
1661 1687 ret = self.merge3(f, my, other, xp1, xp2)
1662 1688 if ret:
1663 1689 err = True
1664 1690 failedmerge.append(f)
1665 1691 util.set_exec(self.wjoin(f), flag)
1666 1692 if moddirstate:
1667 1693 if branch_merge:
1668 1694 # We've done a branch merge, mark this file as merged
1669 1695 # so that we properly record the merger later
1670 1696 self.dirstate.update([f], 'm')
1671 1697 else:
1672 1698 # We've update-merged a locally modified file, so
1673 1699 # we set the dirstate to emulate a normal checkout
1674 1700 # of that file some time in the past. Thus our
1675 1701 # merge will appear as a normal local file
1676 1702 # modification.
1677 1703 f_len = len(self.file(f).read(other))
1678 1704 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1679 1705
1680 1706 remove.sort()
1681 1707 for f in remove:
1682 1708 self.ui.note(_("removing %s\n") % f)
1683 1709 util.audit_path(f)
1684 1710 try:
1685 1711 util.unlink(self.wjoin(f))
1686 1712 except OSError, inst:
1687 1713 if inst.errno != errno.ENOENT:
1688 1714 self.ui.warn(_("update failed to remove %s: %s!\n") %
1689 1715 (f, inst.strerror))
1690 1716 if moddirstate:
1691 1717 if branch_merge:
1692 1718 self.dirstate.update(remove, 'r')
1693 1719 else:
1694 1720 self.dirstate.forget(remove)
1695 1721
1696 1722 if moddirstate:
1697 1723 self.dirstate.setparents(p1, p2)
1698 1724
1699 1725 stat = ((len(get), _("updated")),
1700 1726 (len(merge) - len(failedmerge), _("merged")),
1701 1727 (len(remove), _("removed")),
1702 1728 (len(failedmerge), _("unresolved")))
1703 1729 note = ", ".join([_("%d files %s") % s for s in stat])
1704 1730 self.ui.note("%s\n" % note)
1705 1731 if moddirstate and branch_merge:
1706 1732 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1707 1733
1708 1734 return err
1709 1735
1710 1736 def merge3(self, fn, my, other, p1, p2):
1711 1737 """perform a 3-way merge in the working directory"""
1712 1738
1713 1739 def temp(prefix, node):
1714 1740 pre = "%s~%s." % (os.path.basename(fn), prefix)
1715 1741 (fd, name) = tempfile.mkstemp("", pre)
1716 1742 f = os.fdopen(fd, "wb")
1717 1743 self.wwrite(fn, fl.read(node), f)
1718 1744 f.close()
1719 1745 return name
1720 1746
1721 1747 fl = self.file(fn)
1722 1748 base = fl.ancestor(my, other)
1723 1749 a = self.wjoin(fn)
1724 1750 b = temp("base", base)
1725 1751 c = temp("other", other)
1726 1752
1727 1753 self.ui.note(_("resolving %s\n") % fn)
1728 1754 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1729 1755 (fn, short(my), short(other), short(base)))
1730 1756
1731 1757 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1732 1758 or "hgmerge")
1733 1759 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1734 1760 environ={'HG_FILE': fn,
1735 1761 'HG_MY_NODE': p1,
1736 1762 'HG_OTHER_NODE': p2,
1737 1763 'HG_FILE_MY_NODE': hex(my),
1738 1764 'HG_FILE_OTHER_NODE': hex(other),
1739 1765 'HG_FILE_BASE_NODE': hex(base)})
1740 1766 if r:
1741 1767 self.ui.warn(_("merging %s failed!\n") % fn)
1742 1768
1743 1769 os.unlink(b)
1744 1770 os.unlink(c)
1745 1771 return r
1746 1772
1747 1773 def verify(self):
1748 1774 filelinkrevs = {}
1749 1775 filenodes = {}
1750 1776 changesets = revisions = files = 0
1751 1777 errors = [0]
1752 1778 neededmanifests = {}
1753 1779
1754 1780 def err(msg):
1755 1781 self.ui.warn(msg + "\n")
1756 1782 errors[0] += 1
1757 1783
1758 1784 def checksize(obj, name):
1759 1785 d = obj.checksize()
1760 1786 if d[0]:
1761 1787 err(_("%s data length off by %d bytes") % (name, d[0]))
1762 1788 if d[1]:
1763 1789 err(_("%s index contains %d extra bytes") % (name, d[1]))
1764 1790
1765 1791 seen = {}
1766 1792 self.ui.status(_("checking changesets\n"))
1767 1793 checksize(self.changelog, "changelog")
1768 1794
1769 1795 for i in range(self.changelog.count()):
1770 1796 changesets += 1
1771 1797 n = self.changelog.node(i)
1772 1798 l = self.changelog.linkrev(n)
1773 1799 if l != i:
1774 1800 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1775 1801 if n in seen:
1776 1802 err(_("duplicate changeset at revision %d") % i)
1777 1803 seen[n] = 1
1778 1804
1779 1805 for p in self.changelog.parents(n):
1780 1806 if p not in self.changelog.nodemap:
1781 1807 err(_("changeset %s has unknown parent %s") %
1782 1808 (short(n), short(p)))
1783 1809 try:
1784 1810 changes = self.changelog.read(n)
1785 1811 except KeyboardInterrupt:
1786 1812 self.ui.warn(_("interrupted"))
1787 1813 raise
1788 1814 except Exception, inst:
1789 1815 err(_("unpacking changeset %s: %s") % (short(n), inst))
1790 1816 continue
1791 1817
1792 1818 neededmanifests[changes[0]] = n
1793 1819
1794 1820 for f in changes[3]:
1795 1821 filelinkrevs.setdefault(f, []).append(i)
1796 1822
1797 1823 seen = {}
1798 1824 self.ui.status(_("checking manifests\n"))
1799 1825 checksize(self.manifest, "manifest")
1800 1826
1801 1827 for i in range(self.manifest.count()):
1802 1828 n = self.manifest.node(i)
1803 1829 l = self.manifest.linkrev(n)
1804 1830
1805 1831 if l < 0 or l >= self.changelog.count():
1806 1832 err(_("bad manifest link (%d) at revision %d") % (l, i))
1807 1833
1808 1834 if n in neededmanifests:
1809 1835 del neededmanifests[n]
1810 1836
1811 1837 if n in seen:
1812 1838 err(_("duplicate manifest at revision %d") % i)
1813 1839
1814 1840 seen[n] = 1
1815 1841
1816 1842 for p in self.manifest.parents(n):
1817 1843 if p not in self.manifest.nodemap:
1818 1844 err(_("manifest %s has unknown parent %s") %
1819 1845 (short(n), short(p)))
1820 1846
1821 1847 try:
1822 1848 delta = mdiff.patchtext(self.manifest.delta(n))
1823 1849 except KeyboardInterrupt:
1824 1850 self.ui.warn(_("interrupted"))
1825 1851 raise
1826 1852 except Exception, inst:
1827 1853 err(_("unpacking manifest %s: %s") % (short(n), inst))
1828 1854 continue
1829 1855
1830 1856 try:
1831 1857 ff = [ l.split('\0') for l in delta.splitlines() ]
1832 1858 for f, fn in ff:
1833 1859 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1834 1860 except (ValueError, TypeError), inst:
1835 1861 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1836 1862
1837 1863 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1838 1864
1839 1865 for m, c in neededmanifests.items():
1840 1866 err(_("Changeset %s refers to unknown manifest %s") %
1841 1867 (short(m), short(c)))
1842 1868 del neededmanifests
1843 1869
1844 1870 for f in filenodes:
1845 1871 if f not in filelinkrevs:
1846 1872 err(_("file %s in manifest but not in changesets") % f)
1847 1873
1848 1874 for f in filelinkrevs:
1849 1875 if f not in filenodes:
1850 1876 err(_("file %s in changeset but not in manifest") % f)
1851 1877
1852 1878 self.ui.status(_("checking files\n"))
1853 1879 ff = filenodes.keys()
1854 1880 ff.sort()
1855 1881 for f in ff:
1856 1882 if f == "/dev/null":
1857 1883 continue
1858 1884 files += 1
1859 1885 if not f:
1860 1886 err(_("file without name in manifest %s") % short(n))
1861 1887 continue
1862 1888 fl = self.file(f)
1863 1889 checksize(fl, f)
1864 1890
1865 1891 nodes = {nullid: 1}
1866 1892 seen = {}
1867 1893 for i in range(fl.count()):
1868 1894 revisions += 1
1869 1895 n = fl.node(i)
1870 1896
1871 1897 if n in seen:
1872 1898 err(_("%s: duplicate revision %d") % (f, i))
1873 1899 if n not in filenodes[f]:
1874 1900 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1875 1901 else:
1876 1902 del filenodes[f][n]
1877 1903
1878 1904 flr = fl.linkrev(n)
1879 1905 if flr not in filelinkrevs.get(f, []):
1880 1906 err(_("%s:%s points to unexpected changeset %d")
1881 1907 % (f, short(n), flr))
1882 1908 else:
1883 1909 filelinkrevs[f].remove(flr)
1884 1910
1885 1911 # verify contents
1886 1912 try:
1887 1913 t = fl.read(n)
1888 1914 except KeyboardInterrupt:
1889 1915 self.ui.warn(_("interrupted"))
1890 1916 raise
1891 1917 except Exception, inst:
1892 1918 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1893 1919
1894 1920 # verify parents
1895 1921 (p1, p2) = fl.parents(n)
1896 1922 if p1 not in nodes:
1897 1923 err(_("file %s:%s unknown parent 1 %s") %
1898 1924 (f, short(n), short(p1)))
1899 1925 if p2 not in nodes:
1900 1926 err(_("file %s:%s unknown parent 2 %s") %
1901 1927 (f, short(n), short(p1)))
1902 1928 nodes[n] = 1
1903 1929
1904 1930 # cross-check
1905 1931 for node in filenodes[f]:
1906 1932 err(_("node %s in manifests not in %s") % (hex(node), f))
1907 1933
1908 1934 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1909 1935 (files, changesets, revisions))
1910 1936
1911 1937 if errors[0]:
1912 1938 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1913 1939 return 1
1914 1940
1915 1941 # used to avoid circular references so destructors work
1916 1942 def aftertrans(base):
1917 1943 p = base
1918 1944 def a():
1919 1945 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1920 1946 util.rename(os.path.join(p, "journal.dirstate"),
1921 1947 os.path.join(p, "undo.dirstate"))
1922 1948 return a
1923 1949
@@ -1,28 +1,55 b''
1 1 #!/bin/sh
2 2
3 3 mkdir a
4 4 cd a
5 5 hg init
6 6 echo foo > t1
7 7 hg add t1
8 8 hg commit -m "1" -d "1000000 0"
9 9
10 10 cd ..
11 11 hg clone a b
12 12
13 13 cd a
14 14 echo foo > t2
15 15 hg add t2
16 16 hg commit -m "2" -d "1000000 0"
17 17
18 18 cd ../b
19 19 echo foo > t3
20 20 hg add t3
21 21 hg commit -m "3" -d "1000000 0"
22 22
23 23 hg push ../a
24 24 hg pull ../a
25 25 hg push ../a
26 26 hg up -m
27 27 hg commit -m "4" -d "1000000 0"
28 28 hg push ../a
29 cd ..
30
31 hg init c
32 cd c
33 for i in 0 1 2; do
34 echo $i >> foo
35 hg ci -Am $i -d "1000000 0"
36 done
37 cd ..
38
39 hg clone c d
40 cd d
41 for i in 0 1; do
42 hg co -C $i
43 echo d-$i >> foo
44 hg ci -m d-$i -d "1000000 0"
45 done
46
47 HGMERGE=true hg co -m 3
48 hg ci -m c-d -d "1000000 0"
49
50 hg push ../c
51 hg push -r 2 ../c
52 hg push -r 3 -r 4 ../c
53 hg push -r 5 ../c
54
55 exit 0
@@ -1,21 +1,38 b''
1 1 pushing to ../a
2 2 searching for changes
3 3 abort: unsynced remote changes!
4 4 (did you forget to sync? use push -f to force)
5 5 pulling from ../a
6 6 searching for changes
7 7 adding changesets
8 8 adding manifests
9 9 adding file changes
10 10 added 1 changesets with 1 changes to 1 files (+1 heads)
11 11 (run 'hg heads' to see heads, 'hg merge' to merge)
12 12 pushing to ../a
13 13 searching for changes
14 14 abort: push creates new remote branches!
15 15 (did you forget to merge? use push -f to force)
16 16 pushing to ../a
17 17 searching for changes
18 18 adding changesets
19 19 adding manifests
20 20 adding file changes
21 21 added 2 changesets with 1 changes to 1 files
22 adding foo
23 merging foo
24 pushing to ../c
25 searching for changes
26 abort: push creates new remote branches!
27 (did you forget to merge? use push -f to force)
28 pushing to ../c
29 searching for changes
30 no changes found
31 pushing to ../c
32 searching for changes
33 abort: push creates new remote branches!
34 (did you forget to merge? use push -f to force)
35 pushing to ../c
36 searching for changes
37 abort: push creates new remote branches!
38 (did you forget to merge? use push -f to force)
General Comments 0
You need to be logged in to leave comments. Login now