##// END OF EJS Templates
Restored old behaviour for 'hg co' and 'hg co -C' for removed files.
Thomas Arendsen Hein -
r1621:ee16f061 default
parent child Browse files
Show More
@@ -1,1815 +1,1815 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14 14
15 15 class localrepository(object):
16 16 def __init__(self, ui, path=None, create=0):
17 17 if not path:
18 18 p = os.getcwd()
19 19 while not os.path.isdir(os.path.join(p, ".hg")):
20 20 oldp = p
21 21 p = os.path.dirname(p)
22 22 if p == oldp:
23 23 raise repo.RepoError(_("no repo found"))
24 24 path = p
25 25 self.path = os.path.join(path, ".hg")
26 26
27 27 if not create and not os.path.isdir(self.path):
28 28 raise repo.RepoError(_("repository %s not found") % path)
29 29
30 30 self.root = os.path.abspath(path)
31 31 self.ui = ui
32 32 self.opener = util.opener(self.path)
33 33 self.wopener = util.opener(self.root)
34 34 self.manifest = manifest.manifest(self.opener)
35 35 self.changelog = changelog.changelog(self.opener)
36 36 self.tagscache = None
37 37 self.nodetagscache = None
38 38 self.encodepats = None
39 39 self.decodepats = None
40 40
41 41 if create:
42 42 os.mkdir(self.path)
43 43 os.mkdir(self.join("data"))
44 44
45 45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
46 46 try:
47 47 self.ui.readconfig(self.join("hgrc"))
48 48 except IOError:
49 49 pass
50 50
51 51 def hook(self, name, **args):
52 52 def runhook(name, cmd):
53 53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
54 54 old = {}
55 55 for k, v in args.items():
56 56 k = k.upper()
57 57 old[k] = os.environ.get(k, None)
58 58 os.environ[k] = v
59 59
60 60 # Hooks run in the repository root
61 61 olddir = os.getcwd()
62 62 os.chdir(self.root)
63 63 r = os.system(cmd)
64 64 os.chdir(olddir)
65 65
66 66 for k, v in old.items():
67 67 if v != None:
68 68 os.environ[k] = v
69 69 else:
70 70 del os.environ[k]
71 71
72 72 if r:
73 73 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
74 74 (name, r))
75 75 return False
76 76 return True
77 77
78 78 r = True
79 79 for hname, cmd in self.ui.configitems("hooks"):
80 80 s = hname.split(".")
81 81 if s[0] == name and cmd:
82 82 r = runhook(hname, cmd) and r
83 83 return r
84 84
85 85 def tags(self):
86 86 '''return a mapping of tag to node'''
87 87 if not self.tagscache:
88 88 self.tagscache = {}
89 89 def addtag(self, k, n):
90 90 try:
91 91 bin_n = bin(n)
92 92 except TypeError:
93 93 bin_n = ''
94 94 self.tagscache[k.strip()] = bin_n
95 95
96 96 try:
97 97 # read each head of the tags file, ending with the tip
98 98 # and add each tag found to the map, with "newer" ones
99 99 # taking precedence
100 100 fl = self.file(".hgtags")
101 101 h = fl.heads()
102 102 h.reverse()
103 103 for r in h:
104 104 for l in fl.read(r).splitlines():
105 105 if l:
106 106 n, k = l.split(" ", 1)
107 107 addtag(self, k, n)
108 108 except KeyError:
109 109 pass
110 110
111 111 try:
112 112 f = self.opener("localtags")
113 113 for l in f:
114 114 n, k = l.split(" ", 1)
115 115 addtag(self, k, n)
116 116 except IOError:
117 117 pass
118 118
119 119 self.tagscache['tip'] = self.changelog.tip()
120 120
121 121 return self.tagscache
122 122
123 123 def tagslist(self):
124 124 '''return a list of tags ordered by revision'''
125 125 l = []
126 126 for t, n in self.tags().items():
127 127 try:
128 128 r = self.changelog.rev(n)
129 129 except:
130 130 r = -2 # sort to the beginning of the list if unknown
131 131 l.append((r, t, n))
132 132 l.sort()
133 133 return [(t, n) for r, t, n in l]
134 134
135 135 def nodetags(self, node):
136 136 '''return the tags associated with a node'''
137 137 if not self.nodetagscache:
138 138 self.nodetagscache = {}
139 139 for t, n in self.tags().items():
140 140 self.nodetagscache.setdefault(n, []).append(t)
141 141 return self.nodetagscache.get(node, [])
142 142
143 143 def lookup(self, key):
144 144 try:
145 145 return self.tags()[key]
146 146 except KeyError:
147 147 try:
148 148 return self.changelog.lookup(key)
149 149 except:
150 150 raise repo.RepoError(_("unknown revision '%s'") % key)
151 151
152 152 def dev(self):
153 153 return os.stat(self.path).st_dev
154 154
155 155 def local(self):
156 156 return True
157 157
158 158 def join(self, f):
159 159 return os.path.join(self.path, f)
160 160
161 161 def wjoin(self, f):
162 162 return os.path.join(self.root, f)
163 163
164 164 def file(self, f):
165 165 if f[0] == '/':
166 166 f = f[1:]
167 167 return filelog.filelog(self.opener, f)
168 168
169 169 def getcwd(self):
170 170 return self.dirstate.getcwd()
171 171
172 172 def wfile(self, f, mode='r'):
173 173 return self.wopener(f, mode)
174 174
175 175 def wread(self, filename):
176 176 if self.encodepats == None:
177 177 l = []
178 178 for pat, cmd in self.ui.configitems("encode"):
179 179 mf = util.matcher("", "/", [pat], [], [])[1]
180 180 l.append((mf, cmd))
181 181 self.encodepats = l
182 182
183 183 data = self.wopener(filename, 'r').read()
184 184
185 185 for mf, cmd in self.encodepats:
186 186 if mf(filename):
187 187 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
188 188 data = util.filter(data, cmd)
189 189 break
190 190
191 191 return data
192 192
193 193 def wwrite(self, filename, data, fd=None):
194 194 if self.decodepats == None:
195 195 l = []
196 196 for pat, cmd in self.ui.configitems("decode"):
197 197 mf = util.matcher("", "/", [pat], [], [])[1]
198 198 l.append((mf, cmd))
199 199 self.decodepats = l
200 200
201 201 for mf, cmd in self.decodepats:
202 202 if mf(filename):
203 203 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
204 204 data = util.filter(data, cmd)
205 205 break
206 206
207 207 if fd:
208 208 return fd.write(data)
209 209 return self.wopener(filename, 'w').write(data)
210 210
211 211 def transaction(self):
212 212 # save dirstate for undo
213 213 try:
214 214 ds = self.opener("dirstate").read()
215 215 except IOError:
216 216 ds = ""
217 217 self.opener("journal.dirstate", "w").write(ds)
218 218
219 219 def after():
220 220 util.rename(self.join("journal"), self.join("undo"))
221 221 util.rename(self.join("journal.dirstate"),
222 222 self.join("undo.dirstate"))
223 223
224 224 return transaction.transaction(self.ui.warn, self.opener,
225 225 self.join("journal"), after)
226 226
227 227 def recover(self):
228 228 lock = self.lock()
229 229 if os.path.exists(self.join("journal")):
230 230 self.ui.status(_("rolling back interrupted transaction\n"))
231 231 transaction.rollback(self.opener, self.join("journal"))
232 232 self.manifest = manifest.manifest(self.opener)
233 233 self.changelog = changelog.changelog(self.opener)
234 234 return True
235 235 else:
236 236 self.ui.warn(_("no interrupted transaction available\n"))
237 237 return False
238 238
239 239 def undo(self):
240 240 wlock = self.wlock()
241 241 lock = self.lock()
242 242 if os.path.exists(self.join("undo")):
243 243 self.ui.status(_("rolling back last transaction\n"))
244 244 transaction.rollback(self.opener, self.join("undo"))
245 245 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
246 246 self.dirstate.read()
247 247 else:
248 248 self.ui.warn(_("no undo information available\n"))
249 249
250 250 def lock(self, wait=1):
251 251 try:
252 252 return lock.lock(self.join("lock"), 0)
253 253 except lock.LockHeld, inst:
254 254 if wait:
255 255 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
256 256 return lock.lock(self.join("lock"), wait)
257 257 raise inst
258 258
259 259 def wlock(self, wait=1):
260 260 try:
261 261 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
262 262 except lock.LockHeld, inst:
263 263 if not wait:
264 264 raise inst
265 265 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
266 266 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
267 267 self.dirstate.read()
268 268 return wlock
269 269
270 270 def rawcommit(self, files, text, user, date, p1=None, p2=None):
271 271 orig_parent = self.dirstate.parents()[0] or nullid
272 272 p1 = p1 or self.dirstate.parents()[0] or nullid
273 273 p2 = p2 or self.dirstate.parents()[1] or nullid
274 274 c1 = self.changelog.read(p1)
275 275 c2 = self.changelog.read(p2)
276 276 m1 = self.manifest.read(c1[0])
277 277 mf1 = self.manifest.readflags(c1[0])
278 278 m2 = self.manifest.read(c2[0])
279 279 changed = []
280 280
281 281 if orig_parent == p1:
282 282 update_dirstate = 1
283 283 else:
284 284 update_dirstate = 0
285 285
286 286 wlock = self.wlock()
287 287 lock = self.lock()
288 288 tr = self.transaction()
289 289 mm = m1.copy()
290 290 mfm = mf1.copy()
291 291 linkrev = self.changelog.count()
292 292 for f in files:
293 293 try:
294 294 t = self.wread(f)
295 295 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
296 296 r = self.file(f)
297 297 mfm[f] = tm
298 298
299 299 fp1 = m1.get(f, nullid)
300 300 fp2 = m2.get(f, nullid)
301 301
302 302 # is the same revision on two branches of a merge?
303 303 if fp2 == fp1:
304 304 fp2 = nullid
305 305
306 306 if fp2 != nullid:
307 307 # is one parent an ancestor of the other?
308 308 fpa = r.ancestor(fp1, fp2)
309 309 if fpa == fp1:
310 310 fp1, fp2 = fp2, nullid
311 311 elif fpa == fp2:
312 312 fp2 = nullid
313 313
314 314 # is the file unmodified from the parent?
315 315 if t == r.read(fp1):
316 316 # record the proper existing parent in manifest
317 317 # no need to add a revision
318 318 mm[f] = fp1
319 319 continue
320 320
321 321 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
322 322 changed.append(f)
323 323 if update_dirstate:
324 324 self.dirstate.update([f], "n")
325 325 except IOError:
326 326 try:
327 327 del mm[f]
328 328 del mfm[f]
329 329 if update_dirstate:
330 330 self.dirstate.forget([f])
331 331 except:
332 332 # deleted from p2?
333 333 pass
334 334
335 335 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
336 336 user = user or self.ui.username()
337 337 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
338 338 tr.close()
339 339 if update_dirstate:
340 340 self.dirstate.setparents(n, nullid)
341 341
342 342 def commit(self, files=None, text="", user=None, date=None,
343 343 match=util.always, force=False):
344 344 commit = []
345 345 remove = []
346 346 changed = []
347 347
348 348 if files:
349 349 for f in files:
350 350 s = self.dirstate.state(f)
351 351 if s in 'nmai':
352 352 commit.append(f)
353 353 elif s == 'r':
354 354 remove.append(f)
355 355 else:
356 356 self.ui.warn(_("%s not tracked!\n") % f)
357 357 else:
358 358 modified, added, removed, deleted, unknown = self.changes(match=match)
359 359 commit = modified + added
360 360 remove = removed
361 361
362 362 p1, p2 = self.dirstate.parents()
363 363 c1 = self.changelog.read(p1)
364 364 c2 = self.changelog.read(p2)
365 365 m1 = self.manifest.read(c1[0])
366 366 mf1 = self.manifest.readflags(c1[0])
367 367 m2 = self.manifest.read(c2[0])
368 368
369 369 if not commit and not remove and not force and p2 == nullid:
370 370 self.ui.status(_("nothing changed\n"))
371 371 return None
372 372
373 373 if not self.hook("precommit"):
374 374 return None
375 375
376 376 wlock = self.wlock()
377 377 lock = self.lock()
378 378 tr = self.transaction()
379 379
380 380 # check in files
381 381 new = {}
382 382 linkrev = self.changelog.count()
383 383 commit.sort()
384 384 for f in commit:
385 385 self.ui.note(f + "\n")
386 386 try:
387 387 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
388 388 t = self.wread(f)
389 389 except IOError:
390 390 self.ui.warn(_("trouble committing %s!\n") % f)
391 391 raise
392 392
393 393 r = self.file(f)
394 394
395 395 meta = {}
396 396 cp = self.dirstate.copied(f)
397 397 if cp:
398 398 meta["copy"] = cp
399 399 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
400 400 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
401 401 fp1, fp2 = nullid, nullid
402 402 else:
403 403 fp1 = m1.get(f, nullid)
404 404 fp2 = m2.get(f, nullid)
405 405
406 406 if fp2 != nullid:
407 407 # is one parent an ancestor of the other?
408 408 fpa = r.ancestor(fp1, fp2)
409 409 if fpa == fp1:
410 410 fp1, fp2 = fp2, nullid
411 411 elif fpa == fp2:
412 412 fp2 = nullid
413 413
414 414 # is the file unmodified from the parent?
415 415 if not meta and t == r.read(fp1) and fp2 == nullid:
416 416 # record the proper existing parent in manifest
417 417 # no need to add a revision
418 418 new[f] = fp1
419 419 continue
420 420
421 421 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
422 422 # remember what we've added so that we can later calculate
423 423 # the files to pull from a set of changesets
424 424 changed.append(f)
425 425
426 426 # update manifest
427 427 m1.update(new)
428 428 for f in remove:
429 429 if f in m1:
430 430 del m1[f]
431 431 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
432 432 (new, remove))
433 433
434 434 # add changeset
435 435 new = new.keys()
436 436 new.sort()
437 437
438 438 if not text:
439 439 edittext = ""
440 440 if p2 != nullid:
441 441 edittext += "HG: branch merge\n"
442 442 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
443 443 edittext += "".join(["HG: changed %s\n" % f for f in changed])
444 444 edittext += "".join(["HG: removed %s\n" % f for f in remove])
445 445 if not changed and not remove:
446 446 edittext += "HG: no files changed\n"
447 447 edittext = self.ui.edit(edittext)
448 448 if not edittext.rstrip():
449 449 return None
450 450 text = edittext
451 451
452 452 user = user or self.ui.username()
453 453 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
454 454 tr.close()
455 455
456 456 self.dirstate.setparents(n)
457 457 self.dirstate.update(new, "n")
458 458 self.dirstate.forget(remove)
459 459
460 460 if not self.hook("commit", node=hex(n)):
461 461 return None
462 462 return n
463 463
464 464 def walk(self, node=None, files=[], match=util.always):
465 465 if node:
466 466 fdict = dict.fromkeys(files)
467 467 for fn in self.manifest.read(self.changelog.read(node)[0]):
468 468 fdict.pop(fn, None)
469 469 if match(fn):
470 470 yield 'm', fn
471 471 for fn in fdict:
472 472 self.ui.warn(_('%s: No such file in rev %s\n') % (
473 473 util.pathto(self.getcwd(), fn), short(node)))
474 474 else:
475 475 for src, fn in self.dirstate.walk(files, match):
476 476 yield src, fn
477 477
478 478 def changes(self, node1=None, node2=None, files=[], match=util.always):
479 479 """return changes between two nodes or node and working directory
480 480
481 481 If node1 is None, use the first dirstate parent instead.
482 482 If node2 is None, compare node1 with working directory.
483 483 """
484 484
485 485 def fcmp(fn, mf):
486 486 t1 = self.wread(fn)
487 487 t2 = self.file(fn).read(mf.get(fn, nullid))
488 488 return cmp(t1, t2)
489 489
490 490 def mfmatches(node):
491 491 change = self.changelog.read(node)
492 492 mf = dict(self.manifest.read(change[0]))
493 493 for fn in mf.keys():
494 494 if not match(fn):
495 495 del mf[fn]
496 496 return mf
497 497
498 498 # are we comparing the working directory?
499 499 if not node2:
500 500 try:
501 501 wlock = self.wlock(wait=0)
502 502 except lock.LockHeld:
503 503 wlock = None
504 504 lookup, modified, added, removed, deleted, unknown = (
505 505 self.dirstate.changes(files, match))
506 506
507 507 # are we comparing working dir against its parent?
508 508 if not node1:
509 509 if lookup:
510 510 # do a full compare of any files that might have changed
511 511 mf2 = mfmatches(self.dirstate.parents()[0])
512 512 for f in lookup:
513 513 if fcmp(f, mf2):
514 514 modified.append(f)
515 515 elif wlock is not None:
516 516 self.dirstate.update([f], "n")
517 517 else:
518 518 # we are comparing working dir against non-parent
519 519 # generate a pseudo-manifest for the working dir
520 520 mf2 = mfmatches(self.dirstate.parents()[0])
521 521 for f in lookup + modified + added:
522 522 mf2[f] = ""
523 523 for f in removed:
524 524 if f in mf2:
525 525 del mf2[f]
526 526 else:
527 527 # we are comparing two revisions
528 528 deleted, unknown = [], []
529 529 mf2 = mfmatches(node2)
530 530
531 531 if node1:
532 532 # flush lists from dirstate before comparing manifests
533 533 modified, added = [], []
534 534
535 535 mf1 = mfmatches(node1)
536 536
537 537 for fn in mf2:
538 538 if mf1.has_key(fn):
539 539 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
540 540 modified.append(fn)
541 541 del mf1[fn]
542 542 else:
543 543 added.append(fn)
544 544
545 545 removed = mf1.keys()
546 546
547 547 # sort and return results:
548 548 for l in modified, added, removed, deleted, unknown:
549 549 l.sort()
550 550 return (modified, added, removed, deleted, unknown)
551 551
552 552 def add(self, list):
553 553 wlock = self.wlock()
554 554 for f in list:
555 555 p = self.wjoin(f)
556 556 if not os.path.exists(p):
557 557 self.ui.warn(_("%s does not exist!\n") % f)
558 558 elif not os.path.isfile(p):
559 559 self.ui.warn(_("%s not added: only files supported currently\n")
560 560 % f)
561 561 elif self.dirstate.state(f) in 'an':
562 562 self.ui.warn(_("%s already tracked!\n") % f)
563 563 else:
564 564 self.dirstate.update([f], "a")
565 565
566 566 def forget(self, list):
567 567 wlock = self.wlock()
568 568 for f in list:
569 569 if self.dirstate.state(f) not in 'ai':
570 570 self.ui.warn(_("%s not added!\n") % f)
571 571 else:
572 572 self.dirstate.forget([f])
573 573
574 574 def remove(self, list, unlink=False):
575 575 if unlink:
576 576 for f in list:
577 577 try:
578 578 util.unlink(self.wjoin(f))
579 579 except OSError, inst:
580 580 if inst.errno != errno.ENOENT:
581 581 raise
582 582 wlock = self.wlock()
583 583 for f in list:
584 584 p = self.wjoin(f)
585 585 if os.path.exists(p):
586 586 self.ui.warn(_("%s still exists!\n") % f)
587 587 elif self.dirstate.state(f) == 'a':
588 588 self.ui.warn(_("%s never committed!\n") % f)
589 589 self.dirstate.forget([f])
590 590 elif f not in self.dirstate:
591 591 self.ui.warn(_("%s not tracked!\n") % f)
592 592 else:
593 593 self.dirstate.update([f], "r")
594 594
595 595 def undelete(self, list):
596 596 p = self.dirstate.parents()[0]
597 597 mn = self.changelog.read(p)[0]
598 598 mf = self.manifest.readflags(mn)
599 599 m = self.manifest.read(mn)
600 600 wlock = self.wlock()
601 601 for f in list:
602 602 if self.dirstate.state(f) not in "r":
603 603 self.ui.warn("%s not removed!\n" % f)
604 604 else:
605 605 t = self.file(f).read(m[f])
606 606 self.wwrite(f, t)
607 607 util.set_exec(self.wjoin(f), mf[f])
608 608 self.dirstate.update([f], "n")
609 609
610 610 def copy(self, source, dest):
611 611 p = self.wjoin(dest)
612 612 if not os.path.exists(p):
613 613 self.ui.warn(_("%s does not exist!\n") % dest)
614 614 elif not os.path.isfile(p):
615 615 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
616 616 else:
617 617 wlock = self.wlock()
618 618 if self.dirstate.state(dest) == '?':
619 619 self.dirstate.update([dest], "a")
620 620 self.dirstate.copy(source, dest)
621 621
622 622 def heads(self, start=None):
623 623 heads = self.changelog.heads(start)
624 624 # sort the output in rev descending order
625 625 heads = [(-self.changelog.rev(h), h) for h in heads]
626 626 heads.sort()
627 627 return [n for (r, n) in heads]
628 628
629 629 # branchlookup returns a dict giving a list of branches for
630 630 # each head. A branch is defined as the tag of a node or
631 631 # the branch of the node's parents. If a node has multiple
632 632 # branch tags, tags are eliminated if they are visible from other
633 633 # branch tags.
634 634 #
635 635 # So, for this graph: a->b->c->d->e
636 636 # \ /
637 637 # aa -----/
638 638 # a has tag 2.6.12
639 639 # d has tag 2.6.13
640 640 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
641 641 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
642 642 # from the list.
643 643 #
644 644 # It is possible that more than one head will have the same branch tag.
645 645 # callers need to check the result for multiple heads under the same
646 646 # branch tag if that is a problem for them (ie checkout of a specific
647 647 # branch).
648 648 #
649 649 # passing in a specific branch will limit the depth of the search
650 650 # through the parents. It won't limit the branches returned in the
651 651 # result though.
652 652 def branchlookup(self, heads=None, branch=None):
653 653 if not heads:
654 654 heads = self.heads()
655 655 headt = [ h for h in heads ]
656 656 chlog = self.changelog
657 657 branches = {}
658 658 merges = []
659 659 seenmerge = {}
660 660
661 661 # traverse the tree once for each head, recording in the branches
662 662 # dict which tags are visible from this head. The branches
663 663 # dict also records which tags are visible from each tag
664 664 # while we traverse.
665 665 while headt or merges:
666 666 if merges:
667 667 n, found = merges.pop()
668 668 visit = [n]
669 669 else:
670 670 h = headt.pop()
671 671 visit = [h]
672 672 found = [h]
673 673 seen = {}
674 674 while visit:
675 675 n = visit.pop()
676 676 if n in seen:
677 677 continue
678 678 pp = chlog.parents(n)
679 679 tags = self.nodetags(n)
680 680 if tags:
681 681 for x in tags:
682 682 if x == 'tip':
683 683 continue
684 684 for f in found:
685 685 branches.setdefault(f, {})[n] = 1
686 686 branches.setdefault(n, {})[n] = 1
687 687 break
688 688 if n not in found:
689 689 found.append(n)
690 690 if branch in tags:
691 691 continue
692 692 seen[n] = 1
693 693 if pp[1] != nullid and n not in seenmerge:
694 694 merges.append((pp[1], [x for x in found]))
695 695 seenmerge[n] = 1
696 696 if pp[0] != nullid:
697 697 visit.append(pp[0])
698 698 # traverse the branches dict, eliminating branch tags from each
699 699 # head that are visible from another branch tag for that head.
700 700 out = {}
701 701 viscache = {}
702 702 for h in heads:
703 703 def visible(node):
704 704 if node in viscache:
705 705 return viscache[node]
706 706 ret = {}
707 707 visit = [node]
708 708 while visit:
709 709 x = visit.pop()
710 710 if x in viscache:
711 711 ret.update(viscache[x])
712 712 elif x not in ret:
713 713 ret[x] = 1
714 714 if x in branches:
715 715 visit[len(visit):] = branches[x].keys()
716 716 viscache[node] = ret
717 717 return ret
718 718 if h not in branches:
719 719 continue
720 720 # O(n^2), but somewhat limited. This only searches the
721 721 # tags visible from a specific head, not all the tags in the
722 722 # whole repo.
723 723 for b in branches[h]:
724 724 vis = False
725 725 for bb in branches[h].keys():
726 726 if b != bb:
727 727 if b in visible(bb):
728 728 vis = True
729 729 break
730 730 if not vis:
731 731 l = out.setdefault(h, [])
732 732 l[len(l):] = self.nodetags(b)
733 733 return out
734 734
735 735 def branches(self, nodes):
736 736 if not nodes:
737 737 nodes = [self.changelog.tip()]
738 738 b = []
739 739 for n in nodes:
740 740 t = n
741 741 while n:
742 742 p = self.changelog.parents(n)
743 743 if p[1] != nullid or p[0] == nullid:
744 744 b.append((t, n, p[0], p[1]))
745 745 break
746 746 n = p[0]
747 747 return b
748 748
749 749 def between(self, pairs):
750 750 r = []
751 751
752 752 for top, bottom in pairs:
753 753 n, l, i = top, [], 0
754 754 f = 1
755 755
756 756 while n != bottom:
757 757 p = self.changelog.parents(n)[0]
758 758 if i == f:
759 759 l.append(n)
760 760 f = f * 2
761 761 n = p
762 762 i += 1
763 763
764 764 r.append(l)
765 765
766 766 return r
767 767
768 768 def findincoming(self, remote, base=None, heads=None):
769 769 m = self.changelog.nodemap
770 770 search = []
771 771 fetch = {}
772 772 seen = {}
773 773 seenbranch = {}
774 774 if base == None:
775 775 base = {}
776 776
777 777 # assume we're closer to the tip than the root
778 778 # and start by examining the heads
779 779 self.ui.status(_("searching for changes\n"))
780 780
781 781 if not heads:
782 782 heads = remote.heads()
783 783
784 784 unknown = []
785 785 for h in heads:
786 786 if h not in m:
787 787 unknown.append(h)
788 788 else:
789 789 base[h] = 1
790 790
791 791 if not unknown:
792 792 return None
793 793
794 794 rep = {}
795 795 reqcnt = 0
796 796
797 797 # search through remote branches
798 798 # a 'branch' here is a linear segment of history, with four parts:
799 799 # head, root, first parent, second parent
800 800 # (a branch always has two parents (or none) by definition)
801 801 unknown = remote.branches(unknown)
802 802 while unknown:
803 803 r = []
804 804 while unknown:
805 805 n = unknown.pop(0)
806 806 if n[0] in seen:
807 807 continue
808 808
809 809 self.ui.debug(_("examining %s:%s\n")
810 810 % (short(n[0]), short(n[1])))
811 811 if n[0] == nullid:
812 812 break
813 813 if n in seenbranch:
814 814 self.ui.debug(_("branch already found\n"))
815 815 continue
816 816 if n[1] and n[1] in m: # do we know the base?
817 817 self.ui.debug(_("found incomplete branch %s:%s\n")
818 818 % (short(n[0]), short(n[1])))
819 819 search.append(n) # schedule branch range for scanning
820 820 seenbranch[n] = 1
821 821 else:
822 822 if n[1] not in seen and n[1] not in fetch:
823 823 if n[2] in m and n[3] in m:
824 824 self.ui.debug(_("found new changeset %s\n") %
825 825 short(n[1]))
826 826 fetch[n[1]] = 1 # earliest unknown
827 827 base[n[2]] = 1 # latest known
828 828 continue
829 829
830 830 for a in n[2:4]:
831 831 if a not in rep:
832 832 r.append(a)
833 833 rep[a] = 1
834 834
835 835 seen[n[0]] = 1
836 836
837 837 if r:
838 838 reqcnt += 1
839 839 self.ui.debug(_("request %d: %s\n") %
840 840 (reqcnt, " ".join(map(short, r))))
841 841 for p in range(0, len(r), 10):
842 842 for b in remote.branches(r[p:p+10]):
843 843 self.ui.debug(_("received %s:%s\n") %
844 844 (short(b[0]), short(b[1])))
845 845 if b[0] in m:
846 846 self.ui.debug(_("found base node %s\n")
847 847 % short(b[0]))
848 848 base[b[0]] = 1
849 849 elif b[0] not in seen:
850 850 unknown.append(b)
851 851
852 852 # do binary search on the branches we found
853 853 while search:
854 854 n = search.pop(0)
855 855 reqcnt += 1
856 856 l = remote.between([(n[0], n[1])])[0]
857 857 l.append(n[1])
858 858 p = n[0]
859 859 f = 1
860 860 for i in l:
861 861 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
862 862 if i in m:
863 863 if f <= 2:
864 864 self.ui.debug(_("found new branch changeset %s\n") %
865 865 short(p))
866 866 fetch[p] = 1
867 867 base[i] = 1
868 868 else:
869 869 self.ui.debug(_("narrowed branch search to %s:%s\n")
870 870 % (short(p), short(i)))
871 871 search.append((p, i))
872 872 break
873 873 p, f = i, f * 2
874 874
875 875 # sanity check our fetch list
876 876 for f in fetch.keys():
877 877 if f in m:
878 878 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
879 879
880 880 if base.keys() == [nullid]:
881 881 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
882 882
883 883 self.ui.note(_("found new changesets starting at ") +
884 884 " ".join([short(f) for f in fetch]) + "\n")
885 885
886 886 self.ui.debug(_("%d total queries\n") % reqcnt)
887 887
888 888 return fetch.keys()
889 889
890 890 def findoutgoing(self, remote, base=None, heads=None):
891 891 if base == None:
892 892 base = {}
893 893 self.findincoming(remote, base, heads)
894 894
895 895 self.ui.debug(_("common changesets up to ")
896 896 + " ".join(map(short, base.keys())) + "\n")
897 897
898 898 remain = dict.fromkeys(self.changelog.nodemap)
899 899
900 900 # prune everything remote has from the tree
901 901 del remain[nullid]
902 902 remove = base.keys()
903 903 while remove:
904 904 n = remove.pop(0)
905 905 if n in remain:
906 906 del remain[n]
907 907 for p in self.changelog.parents(n):
908 908 remove.append(p)
909 909
910 910 # find every node whose parents have been pruned
911 911 subset = []
912 912 for n in remain:
913 913 p1, p2 = self.changelog.parents(n)
914 914 if p1 not in remain and p2 not in remain:
915 915 subset.append(n)
916 916
917 917 # this is the set of all roots we have to push
918 918 return subset
919 919
920 920 def pull(self, remote, heads=None):
921 921 lock = self.lock()
922 922
923 923 # if we have an empty repo, fetch everything
924 924 if self.changelog.tip() == nullid:
925 925 self.ui.status(_("requesting all changes\n"))
926 926 fetch = [nullid]
927 927 else:
928 928 fetch = self.findincoming(remote)
929 929
930 930 if not fetch:
931 931 self.ui.status(_("no changes found\n"))
932 932 return 1
933 933
934 934 if heads is None:
935 935 cg = remote.changegroup(fetch)
936 936 else:
937 937 cg = remote.changegroupsubset(fetch, heads)
938 938 return self.addchangegroup(cg)
939 939
940 940 def push(self, remote, force=False):
941 941 lock = remote.lock()
942 942
943 943 base = {}
944 944 heads = remote.heads()
945 945 inc = self.findincoming(remote, base, heads)
946 946 if not force and inc:
947 947 self.ui.warn(_("abort: unsynced remote changes!\n"))
948 948 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
949 949 return 1
950 950
951 951 update = self.findoutgoing(remote, base)
952 952 if not update:
953 953 self.ui.status(_("no changes found\n"))
954 954 return 1
955 955 elif not force:
956 956 if len(heads) < len(self.changelog.heads()):
957 957 self.ui.warn(_("abort: push creates new remote branches!\n"))
958 958 self.ui.status(_("(did you forget to merge?"
959 959 " use push -f to force)\n"))
960 960 return 1
961 961
962 962 cg = self.changegroup(update)
963 963 return remote.addchangegroup(cg)
964 964
965 965 def changegroupsubset(self, bases, heads):
966 966 """This function generates a changegroup consisting of all the nodes
967 967 that are descendents of any of the bases, and ancestors of any of
968 968 the heads.
969 969
970 970 It is fairly complex as determining which filenodes and which
971 971 manifest nodes need to be included for the changeset to be complete
972 972 is non-trivial.
973 973
974 974 Another wrinkle is doing the reverse, figuring out which changeset in
975 975 the changegroup a particular filenode or manifestnode belongs to."""
976 976
977 977 # Set up some initial variables
978 978 # Make it easy to refer to self.changelog
979 979 cl = self.changelog
980 980 # msng is short for missing - compute the list of changesets in this
981 981 # changegroup.
982 982 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
983 983 # Some bases may turn out to be superfluous, and some heads may be
984 984 # too. nodesbetween will return the minimal set of bases and heads
985 985 # necessary to re-create the changegroup.
986 986
987 987 # Known heads are the list of heads that it is assumed the recipient
988 988 # of this changegroup will know about.
989 989 knownheads = {}
990 990 # We assume that all parents of bases are known heads.
991 991 for n in bases:
992 992 for p in cl.parents(n):
993 993 if p != nullid:
994 994 knownheads[p] = 1
995 995 knownheads = knownheads.keys()
996 996 if knownheads:
997 997 # Now that we know what heads are known, we can compute which
998 998 # changesets are known. The recipient must know about all
999 999 # changesets required to reach the known heads from the null
1000 1000 # changeset.
1001 1001 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1002 1002 junk = None
1003 1003 # Transform the list into an ersatz set.
1004 1004 has_cl_set = dict.fromkeys(has_cl_set)
1005 1005 else:
1006 1006 # If there were no known heads, the recipient cannot be assumed to
1007 1007 # know about any changesets.
1008 1008 has_cl_set = {}
1009 1009
1010 1010 # Make it easy to refer to self.manifest
1011 1011 mnfst = self.manifest
1012 1012 # We don't know which manifests are missing yet
1013 1013 msng_mnfst_set = {}
1014 1014 # Nor do we know which filenodes are missing.
1015 1015 msng_filenode_set = {}
1016 1016
1017 1017 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1018 1018 junk = None
1019 1019
1020 1020 # A changeset always belongs to itself, so the changenode lookup
1021 1021 # function for a changenode is identity.
1022 1022 def identity(x):
1023 1023 return x
1024 1024
1025 1025 # A function generating function. Sets up an environment for the
1026 1026 # inner function.
1027 1027 def cmp_by_rev_func(revlog):
1028 1028 # Compare two nodes by their revision number in the environment's
1029 1029 # revision history. Since the revision number both represents the
1030 1030 # most efficient order to read the nodes in, and represents a
1031 1031 # topological sorting of the nodes, this function is often useful.
1032 1032 def cmp_by_rev(a, b):
1033 1033 return cmp(revlog.rev(a), revlog.rev(b))
1034 1034 return cmp_by_rev
1035 1035
1036 1036 # If we determine that a particular file or manifest node must be a
1037 1037 # node that the recipient of the changegroup will already have, we can
1038 1038 # also assume the recipient will have all the parents. This function
1039 1039 # prunes them from the set of missing nodes.
1040 1040 def prune_parents(revlog, hasset, msngset):
1041 1041 haslst = hasset.keys()
1042 1042 haslst.sort(cmp_by_rev_func(revlog))
1043 1043 for node in haslst:
1044 1044 parentlst = [p for p in revlog.parents(node) if p != nullid]
1045 1045 while parentlst:
1046 1046 n = parentlst.pop()
1047 1047 if n not in hasset:
1048 1048 hasset[n] = 1
1049 1049 p = [p for p in revlog.parents(n) if p != nullid]
1050 1050 parentlst.extend(p)
1051 1051 for n in hasset:
1052 1052 msngset.pop(n, None)
1053 1053
1054 1054 # This is a function generating function used to set up an environment
1055 1055 # for the inner function to execute in.
1056 1056 def manifest_and_file_collector(changedfileset):
1057 1057 # This is an information gathering function that gathers
1058 1058 # information from each changeset node that goes out as part of
1059 1059 # the changegroup. The information gathered is a list of which
1060 1060 # manifest nodes are potentially required (the recipient may
1061 1061 # already have them) and total list of all files which were
1062 1062 # changed in any changeset in the changegroup.
1063 1063 #
1064 1064 # We also remember the first changenode we saw any manifest
1065 1065 # referenced by so we can later determine which changenode 'owns'
1066 1066 # the manifest.
1067 1067 def collect_manifests_and_files(clnode):
1068 1068 c = cl.read(clnode)
1069 1069 for f in c[3]:
1070 1070 # This is to make sure we only have one instance of each
1071 1071 # filename string for each filename.
1072 1072 changedfileset.setdefault(f, f)
1073 1073 msng_mnfst_set.setdefault(c[0], clnode)
1074 1074 return collect_manifests_and_files
1075 1075
1076 1076 # Figure out which manifest nodes (of the ones we think might be part
1077 1077 # of the changegroup) the recipient must know about and remove them
1078 1078 # from the changegroup.
1079 1079 def prune_manifests():
1080 1080 has_mnfst_set = {}
1081 1081 for n in msng_mnfst_set:
1082 1082 # If a 'missing' manifest thinks it belongs to a changenode
1083 1083 # the recipient is assumed to have, obviously the recipient
1084 1084 # must have that manifest.
1085 1085 linknode = cl.node(mnfst.linkrev(n))
1086 1086 if linknode in has_cl_set:
1087 1087 has_mnfst_set[n] = 1
1088 1088 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1089 1089
1090 1090 # Use the information collected in collect_manifests_and_files to say
1091 1091 # which changenode any manifestnode belongs to.
1092 1092 def lookup_manifest_link(mnfstnode):
1093 1093 return msng_mnfst_set[mnfstnode]
1094 1094
1095 1095 # A function generating function that sets up the initial environment
1096 1096 # the inner function.
1097 1097 def filenode_collector(changedfiles):
1098 1098 next_rev = [0]
1099 1099 # This gathers information from each manifestnode included in the
1100 1100 # changegroup about which filenodes the manifest node references
1101 1101 # so we can include those in the changegroup too.
1102 1102 #
1103 1103 # It also remembers which changenode each filenode belongs to. It
1104 1104 # does this by assuming the a filenode belongs to the changenode
1105 1105 # the first manifest that references it belongs to.
1106 1106 def collect_msng_filenodes(mnfstnode):
1107 1107 r = mnfst.rev(mnfstnode)
1108 1108 if r == next_rev[0]:
1109 1109 # If the last rev we looked at was the one just previous,
1110 1110 # we only need to see a diff.
1111 1111 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1112 1112 # For each line in the delta
1113 1113 for dline in delta.splitlines():
1114 1114 # get the filename and filenode for that line
1115 1115 f, fnode = dline.split('\0')
1116 1116 fnode = bin(fnode[:40])
1117 1117 f = changedfiles.get(f, None)
1118 1118 # And if the file is in the list of files we care
1119 1119 # about.
1120 1120 if f is not None:
1121 1121 # Get the changenode this manifest belongs to
1122 1122 clnode = msng_mnfst_set[mnfstnode]
1123 1123 # Create the set of filenodes for the file if
1124 1124 # there isn't one already.
1125 1125 ndset = msng_filenode_set.setdefault(f, {})
1126 1126 # And set the filenode's changelog node to the
1127 1127 # manifest's if it hasn't been set already.
1128 1128 ndset.setdefault(fnode, clnode)
1129 1129 else:
1130 1130 # Otherwise we need a full manifest.
1131 1131 m = mnfst.read(mnfstnode)
1132 1132 # For every file in we care about.
1133 1133 for f in changedfiles:
1134 1134 fnode = m.get(f, None)
1135 1135 # If it's in the manifest
1136 1136 if fnode is not None:
1137 1137 # See comments above.
1138 1138 clnode = msng_mnfst_set[mnfstnode]
1139 1139 ndset = msng_filenode_set.setdefault(f, {})
1140 1140 ndset.setdefault(fnode, clnode)
1141 1141 # Remember the revision we hope to see next.
1142 1142 next_rev[0] = r + 1
1143 1143 return collect_msng_filenodes
1144 1144
1145 1145 # We have a list of filenodes we think we need for a file, lets remove
1146 1146 # all those we now the recipient must have.
1147 1147 def prune_filenodes(f, filerevlog):
1148 1148 msngset = msng_filenode_set[f]
1149 1149 hasset = {}
1150 1150 # If a 'missing' filenode thinks it belongs to a changenode we
1151 1151 # assume the recipient must have, then the recipient must have
1152 1152 # that filenode.
1153 1153 for n in msngset:
1154 1154 clnode = cl.node(filerevlog.linkrev(n))
1155 1155 if clnode in has_cl_set:
1156 1156 hasset[n] = 1
1157 1157 prune_parents(filerevlog, hasset, msngset)
1158 1158
1159 1159 # A function generator function that sets up the a context for the
1160 1160 # inner function.
1161 1161 def lookup_filenode_link_func(fname):
1162 1162 msngset = msng_filenode_set[fname]
1163 1163 # Lookup the changenode the filenode belongs to.
1164 1164 def lookup_filenode_link(fnode):
1165 1165 return msngset[fnode]
1166 1166 return lookup_filenode_link
1167 1167
1168 1168 # Now that we have all theses utility functions to help out and
1169 1169 # logically divide up the task, generate the group.
1170 1170 def gengroup():
1171 1171 # The set of changed files starts empty.
1172 1172 changedfiles = {}
1173 1173 # Create a changenode group generator that will call our functions
1174 1174 # back to lookup the owning changenode and collect information.
1175 1175 group = cl.group(msng_cl_lst, identity,
1176 1176 manifest_and_file_collector(changedfiles))
1177 1177 for chnk in group:
1178 1178 yield chnk
1179 1179
1180 1180 # The list of manifests has been collected by the generator
1181 1181 # calling our functions back.
1182 1182 prune_manifests()
1183 1183 msng_mnfst_lst = msng_mnfst_set.keys()
1184 1184 # Sort the manifestnodes by revision number.
1185 1185 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1186 1186 # Create a generator for the manifestnodes that calls our lookup
1187 1187 # and data collection functions back.
1188 1188 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1189 1189 filenode_collector(changedfiles))
1190 1190 for chnk in group:
1191 1191 yield chnk
1192 1192
1193 1193 # These are no longer needed, dereference and toss the memory for
1194 1194 # them.
1195 1195 msng_mnfst_lst = None
1196 1196 msng_mnfst_set.clear()
1197 1197
1198 1198 changedfiles = changedfiles.keys()
1199 1199 changedfiles.sort()
1200 1200 # Go through all our files in order sorted by name.
1201 1201 for fname in changedfiles:
1202 1202 filerevlog = self.file(fname)
1203 1203 # Toss out the filenodes that the recipient isn't really
1204 1204 # missing.
1205 1205 prune_filenodes(fname, filerevlog)
1206 1206 msng_filenode_lst = msng_filenode_set[fname].keys()
1207 1207 # If any filenodes are left, generate the group for them,
1208 1208 # otherwise don't bother.
1209 1209 if len(msng_filenode_lst) > 0:
1210 1210 yield struct.pack(">l", len(fname) + 4) + fname
1211 1211 # Sort the filenodes by their revision #
1212 1212 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1213 1213 # Create a group generator and only pass in a changenode
1214 1214 # lookup function as we need to collect no information
1215 1215 # from filenodes.
1216 1216 group = filerevlog.group(msng_filenode_lst,
1217 1217 lookup_filenode_link_func(fname))
1218 1218 for chnk in group:
1219 1219 yield chnk
1220 1220 # Don't need this anymore, toss it to free memory.
1221 1221 del msng_filenode_set[fname]
1222 1222 # Signal that no more groups are left.
1223 1223 yield struct.pack(">l", 0)
1224 1224
1225 1225 return util.chunkbuffer(gengroup())
1226 1226
1227 1227 def changegroup(self, basenodes):
1228 1228 """Generate a changegroup of all nodes that we have that a recipient
1229 1229 doesn't.
1230 1230
1231 1231 This is much easier than the previous function as we can assume that
1232 1232 the recipient has any changenode we aren't sending them."""
1233 1233 cl = self.changelog
1234 1234 nodes = cl.nodesbetween(basenodes, None)[0]
1235 1235 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1236 1236
1237 1237 def identity(x):
1238 1238 return x
1239 1239
1240 1240 def gennodelst(revlog):
1241 1241 for r in xrange(0, revlog.count()):
1242 1242 n = revlog.node(r)
1243 1243 if revlog.linkrev(n) in revset:
1244 1244 yield n
1245 1245
1246 1246 def changed_file_collector(changedfileset):
1247 1247 def collect_changed_files(clnode):
1248 1248 c = cl.read(clnode)
1249 1249 for fname in c[3]:
1250 1250 changedfileset[fname] = 1
1251 1251 return collect_changed_files
1252 1252
1253 1253 def lookuprevlink_func(revlog):
1254 1254 def lookuprevlink(n):
1255 1255 return cl.node(revlog.linkrev(n))
1256 1256 return lookuprevlink
1257 1257
1258 1258 def gengroup():
1259 1259 # construct a list of all changed files
1260 1260 changedfiles = {}
1261 1261
1262 1262 for chnk in cl.group(nodes, identity,
1263 1263 changed_file_collector(changedfiles)):
1264 1264 yield chnk
1265 1265 changedfiles = changedfiles.keys()
1266 1266 changedfiles.sort()
1267 1267
1268 1268 mnfst = self.manifest
1269 1269 nodeiter = gennodelst(mnfst)
1270 1270 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1271 1271 yield chnk
1272 1272
1273 1273 for fname in changedfiles:
1274 1274 filerevlog = self.file(fname)
1275 1275 nodeiter = gennodelst(filerevlog)
1276 1276 nodeiter = list(nodeiter)
1277 1277 if nodeiter:
1278 1278 yield struct.pack(">l", len(fname) + 4) + fname
1279 1279 lookup = lookuprevlink_func(filerevlog)
1280 1280 for chnk in filerevlog.group(nodeiter, lookup):
1281 1281 yield chnk
1282 1282
1283 1283 yield struct.pack(">l", 0)
1284 1284
1285 1285 return util.chunkbuffer(gengroup())
1286 1286
1287 1287 def addchangegroup(self, source):
1288 1288
1289 1289 def getchunk():
1290 1290 d = source.read(4)
1291 1291 if not d:
1292 1292 return ""
1293 1293 l = struct.unpack(">l", d)[0]
1294 1294 if l <= 4:
1295 1295 return ""
1296 1296 d = source.read(l - 4)
1297 1297 if len(d) < l - 4:
1298 1298 raise repo.RepoError(_("premature EOF reading chunk"
1299 1299 " (got %d bytes, expected %d)")
1300 1300 % (len(d), l - 4))
1301 1301 return d
1302 1302
1303 1303 def getgroup():
1304 1304 while 1:
1305 1305 c = getchunk()
1306 1306 if not c:
1307 1307 break
1308 1308 yield c
1309 1309
1310 1310 def csmap(x):
1311 1311 self.ui.debug(_("add changeset %s\n") % short(x))
1312 1312 return self.changelog.count()
1313 1313
1314 1314 def revmap(x):
1315 1315 return self.changelog.rev(x)
1316 1316
1317 1317 if not source:
1318 1318 return
1319 1319 changesets = files = revisions = 0
1320 1320
1321 1321 tr = self.transaction()
1322 1322
1323 1323 oldheads = len(self.changelog.heads())
1324 1324
1325 1325 # pull off the changeset group
1326 1326 self.ui.status(_("adding changesets\n"))
1327 1327 co = self.changelog.tip()
1328 1328 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1329 1329 cnr, cor = map(self.changelog.rev, (cn, co))
1330 1330 if cn == nullid:
1331 1331 cnr = cor
1332 1332 changesets = cnr - cor
1333 1333
1334 1334 # pull off the manifest group
1335 1335 self.ui.status(_("adding manifests\n"))
1336 1336 mm = self.manifest.tip()
1337 1337 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1338 1338
1339 1339 # process the files
1340 1340 self.ui.status(_("adding file changes\n"))
1341 1341 while 1:
1342 1342 f = getchunk()
1343 1343 if not f:
1344 1344 break
1345 1345 self.ui.debug(_("adding %s revisions\n") % f)
1346 1346 fl = self.file(f)
1347 1347 o = fl.count()
1348 1348 n = fl.addgroup(getgroup(), revmap, tr)
1349 1349 revisions += fl.count() - o
1350 1350 files += 1
1351 1351
1352 1352 newheads = len(self.changelog.heads())
1353 1353 heads = ""
1354 1354 if oldheads and newheads > oldheads:
1355 1355 heads = _(" (+%d heads)") % (newheads - oldheads)
1356 1356
1357 1357 self.ui.status(_("added %d changesets"
1358 1358 " with %d changes to %d files%s\n")
1359 1359 % (changesets, revisions, files, heads))
1360 1360
1361 1361 tr.close()
1362 1362
1363 1363 if changesets > 0:
1364 1364 if not self.hook("changegroup",
1365 1365 node=hex(self.changelog.node(cor+1))):
1366 1366 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1367 1367 return 1
1368 1368
1369 1369 for i in range(cor + 1, cnr + 1):
1370 1370 self.hook("commit", node=hex(self.changelog.node(i)))
1371 1371
1372 1372 return
1373 1373
1374 1374 def update(self, node, allow=False, force=False, choose=None,
1375 1375 moddirstate=True, forcemerge=False):
1376 1376 pl = self.dirstate.parents()
1377 1377 if not force and pl[1] != nullid:
1378 1378 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1379 1379 return 1
1380 1380
1381 1381 p1, p2 = pl[0], node
1382 1382 pa = self.changelog.ancestor(p1, p2)
1383 1383 m1n = self.changelog.read(p1)[0]
1384 1384 m2n = self.changelog.read(p2)[0]
1385 1385 man = self.manifest.ancestor(m1n, m2n)
1386 1386 m1 = self.manifest.read(m1n)
1387 1387 mf1 = self.manifest.readflags(m1n)
1388 1388 m2 = self.manifest.read(m2n)
1389 1389 mf2 = self.manifest.readflags(m2n)
1390 1390 ma = self.manifest.read(man)
1391 1391 mfa = self.manifest.readflags(man)
1392 1392
1393 1393 modified, added, removed, deleted, unknown = self.changes()
1394 1394
1395 1395 if allow and not forcemerge:
1396 1396 if modified or added or removed:
1397 1397 raise util.Abort(_("outstanding uncommited changes"))
1398 1398 if not forcemerge and not force:
1399 1399 for f in unknown:
1400 1400 if f in m2:
1401 1401 t1 = self.wread(f)
1402 1402 t2 = self.file(f).read(m2[f])
1403 1403 if cmp(t1, t2) != 0:
1404 1404 raise util.Abort(_("'%s' already exists in the working"
1405 1405 " dir and differs from remote") % f)
1406 1406
1407 1407 # is this a jump, or a merge? i.e. is there a linear path
1408 1408 # from p1 to p2?
1409 1409 linear_path = (pa == p1 or pa == p2)
1410 1410
1411 1411 # resolve the manifest to determine which files
1412 1412 # we care about merging
1413 1413 self.ui.note(_("resolving manifests\n"))
1414 1414 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1415 1415 (force, allow, moddirstate, linear_path))
1416 1416 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1417 1417 (short(man), short(m1n), short(m2n)))
1418 1418
1419 1419 merge = {}
1420 1420 get = {}
1421 1421 remove = []
1422 1422
1423 1423 # construct a working dir manifest
1424 1424 mw = m1.copy()
1425 1425 mfw = mf1.copy()
1426 1426 umap = dict.fromkeys(unknown)
1427 1427
1428 1428 for f in added + modified + unknown:
1429 1429 mw[f] = ""
1430 1430 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1431 1431
1432 1432 if moddirstate:
1433 1433 wlock = self.wlock()
1434 1434
1435 for f in deleted:
1435 for f in deleted + removed:
1436 1436 if f in mw:
1437 1437 del mw[f]
1438 1438
1439 1439 # If we're jumping between revisions (as opposed to merging),
1440 1440 # and if neither the working directory nor the target rev has
1441 1441 # the file, then we need to remove it from the dirstate, to
1442 1442 # prevent the dirstate from listing the file when it is no
1443 1443 # longer in the manifest.
1444 1444 if moddirstate and linear_path and f not in m2:
1445 1445 self.dirstate.forget((f,))
1446 1446
1447 1447 # Compare manifests
1448 1448 for f, n in mw.iteritems():
1449 1449 if choose and not choose(f):
1450 1450 continue
1451 1451 if f in m2:
1452 1452 s = 0
1453 1453
1454 1454 # is the wfile new since m1, and match m2?
1455 1455 if f not in m1:
1456 1456 t1 = self.wread(f)
1457 1457 t2 = self.file(f).read(m2[f])
1458 1458 if cmp(t1, t2) == 0:
1459 1459 n = m2[f]
1460 1460 del t1, t2
1461 1461
1462 1462 # are files different?
1463 1463 if n != m2[f]:
1464 1464 a = ma.get(f, nullid)
1465 1465 # are both different from the ancestor?
1466 1466 if n != a and m2[f] != a:
1467 1467 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1468 1468 # merge executable bits
1469 1469 # "if we changed or they changed, change in merge"
1470 1470 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1471 1471 mode = ((a^b) | (a^c)) ^ a
1472 1472 merge[f] = (m1.get(f, nullid), m2[f], mode)
1473 1473 s = 1
1474 1474 # are we clobbering?
1475 1475 # is remote's version newer?
1476 1476 # or are we going back in time?
1477 1477 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1478 1478 self.ui.debug(_(" remote %s is newer, get\n") % f)
1479 1479 get[f] = m2[f]
1480 1480 s = 1
1481 1481 elif f in umap:
1482 1482 # this unknown file is the same as the checkout
1483 1483 get[f] = m2[f]
1484 1484
1485 1485 if not s and mfw[f] != mf2[f]:
1486 1486 if force:
1487 1487 self.ui.debug(_(" updating permissions for %s\n") % f)
1488 1488 util.set_exec(self.wjoin(f), mf2[f])
1489 1489 else:
1490 1490 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1491 1491 mode = ((a^b) | (a^c)) ^ a
1492 1492 if mode != b:
1493 1493 self.ui.debug(_(" updating permissions for %s\n")
1494 1494 % f)
1495 1495 util.set_exec(self.wjoin(f), mode)
1496 1496 del m2[f]
1497 1497 elif f in ma:
1498 1498 if n != ma[f]:
1499 1499 r = _("d")
1500 1500 if not force and (linear_path or allow):
1501 1501 r = self.ui.prompt(
1502 1502 (_(" local changed %s which remote deleted\n") % f) +
1503 1503 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1504 1504 if r == _("d"):
1505 1505 remove.append(f)
1506 1506 else:
1507 1507 self.ui.debug(_("other deleted %s\n") % f)
1508 1508 remove.append(f) # other deleted it
1509 1509 else:
1510 1510 # file is created on branch or in working directory
1511 1511 if force and f not in umap:
1512 1512 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1513 1513 remove.append(f)
1514 1514 elif n == m1.get(f, nullid): # same as parent
1515 1515 if p2 == pa: # going backwards?
1516 1516 self.ui.debug(_("remote deleted %s\n") % f)
1517 1517 remove.append(f)
1518 1518 else:
1519 1519 self.ui.debug(_("local modified %s, keeping\n") % f)
1520 1520 else:
1521 1521 self.ui.debug(_("working dir created %s, keeping\n") % f)
1522 1522
1523 1523 for f, n in m2.iteritems():
1524 1524 if choose and not choose(f):
1525 1525 continue
1526 1526 if f[0] == "/":
1527 1527 continue
1528 1528 if f in ma and n != ma[f]:
1529 1529 r = _("k")
1530 1530 if not force and (linear_path or allow):
1531 1531 r = self.ui.prompt(
1532 1532 (_("remote changed %s which local deleted\n") % f) +
1533 1533 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1534 1534 if r == _("k"):
1535 1535 get[f] = n
1536 1536 elif f not in ma:
1537 1537 self.ui.debug(_("remote created %s\n") % f)
1538 1538 get[f] = n
1539 1539 else:
1540 1540 if force or p2 == pa: # going backwards?
1541 1541 self.ui.debug(_("local deleted %s, recreating\n") % f)
1542 1542 get[f] = n
1543 1543 else:
1544 1544 self.ui.debug(_("local deleted %s\n") % f)
1545 1545
1546 1546 del mw, m1, m2, ma
1547 1547
1548 1548 if force:
1549 1549 for f in merge:
1550 1550 get[f] = merge[f][1]
1551 1551 merge = {}
1552 1552
1553 1553 if linear_path or force:
1554 1554 # we don't need to do any magic, just jump to the new rev
1555 1555 branch_merge = False
1556 1556 p1, p2 = p2, nullid
1557 1557 else:
1558 1558 if not allow:
1559 1559 self.ui.status(_("this update spans a branch"
1560 1560 " affecting the following files:\n"))
1561 1561 fl = merge.keys() + get.keys()
1562 1562 fl.sort()
1563 1563 for f in fl:
1564 1564 cf = ""
1565 1565 if f in merge:
1566 1566 cf = _(" (resolve)")
1567 1567 self.ui.status(" %s%s\n" % (f, cf))
1568 1568 self.ui.warn(_("aborting update spanning branches!\n"))
1569 1569 self.ui.status(_("(use update -m to merge across branches"
1570 1570 " or -C to lose changes)\n"))
1571 1571 return 1
1572 1572 branch_merge = True
1573 1573
1574 1574 # get the files we don't need to change
1575 1575 files = get.keys()
1576 1576 files.sort()
1577 1577 for f in files:
1578 1578 if f[0] == "/":
1579 1579 continue
1580 1580 self.ui.note(_("getting %s\n") % f)
1581 1581 t = self.file(f).read(get[f])
1582 1582 self.wwrite(f, t)
1583 1583 util.set_exec(self.wjoin(f), mf2[f])
1584 1584 if moddirstate:
1585 1585 if branch_merge:
1586 1586 self.dirstate.update([f], 'n', st_mtime=-1)
1587 1587 else:
1588 1588 self.dirstate.update([f], 'n')
1589 1589
1590 1590 # merge the tricky bits
1591 1591 files = merge.keys()
1592 1592 files.sort()
1593 1593 for f in files:
1594 1594 self.ui.status(_("merging %s\n") % f)
1595 1595 my, other, flag = merge[f]
1596 1596 self.merge3(f, my, other)
1597 1597 util.set_exec(self.wjoin(f), flag)
1598 1598 if moddirstate:
1599 1599 if branch_merge:
1600 1600 # We've done a branch merge, mark this file as merged
1601 1601 # so that we properly record the merger later
1602 1602 self.dirstate.update([f], 'm')
1603 1603 else:
1604 1604 # We've update-merged a locally modified file, so
1605 1605 # we set the dirstate to emulate a normal checkout
1606 1606 # of that file some time in the past. Thus our
1607 1607 # merge will appear as a normal local file
1608 1608 # modification.
1609 1609 f_len = len(self.file(f).read(other))
1610 1610 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1611 1611
1612 1612 remove.sort()
1613 1613 for f in remove:
1614 1614 self.ui.note(_("removing %s\n") % f)
1615 1615 try:
1616 1616 util.unlink(self.wjoin(f))
1617 1617 except OSError, inst:
1618 1618 if inst.errno != errno.ENOENT:
1619 1619 self.ui.warn(_("update failed to remove %s: %s!\n") %
1620 1620 (f, inst.strerror))
1621 1621 if moddirstate:
1622 1622 if branch_merge:
1623 1623 self.dirstate.update(remove, 'r')
1624 1624 else:
1625 1625 self.dirstate.forget(remove)
1626 1626
1627 1627 if moddirstate:
1628 1628 self.dirstate.setparents(p1, p2)
1629 1629
1630 1630 def merge3(self, fn, my, other):
1631 1631 """perform a 3-way merge in the working directory"""
1632 1632
1633 1633 def temp(prefix, node):
1634 1634 pre = "%s~%s." % (os.path.basename(fn), prefix)
1635 1635 (fd, name) = tempfile.mkstemp("", pre)
1636 1636 f = os.fdopen(fd, "wb")
1637 1637 self.wwrite(fn, fl.read(node), f)
1638 1638 f.close()
1639 1639 return name
1640 1640
1641 1641 fl = self.file(fn)
1642 1642 base = fl.ancestor(my, other)
1643 1643 a = self.wjoin(fn)
1644 1644 b = temp("base", base)
1645 1645 c = temp("other", other)
1646 1646
1647 1647 self.ui.note(_("resolving %s\n") % fn)
1648 1648 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1649 1649 (fn, short(my), short(other), short(base)))
1650 1650
1651 1651 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1652 1652 or "hgmerge")
1653 1653 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1654 1654 if r:
1655 1655 self.ui.warn(_("merging %s failed!\n") % fn)
1656 1656
1657 1657 os.unlink(b)
1658 1658 os.unlink(c)
1659 1659
1660 1660 def verify(self):
1661 1661 filelinkrevs = {}
1662 1662 filenodes = {}
1663 1663 changesets = revisions = files = 0
1664 1664 errors = [0]
1665 1665 neededmanifests = {}
1666 1666
1667 1667 def err(msg):
1668 1668 self.ui.warn(msg + "\n")
1669 1669 errors[0] += 1
1670 1670
1671 1671 seen = {}
1672 1672 self.ui.status(_("checking changesets\n"))
1673 1673 d = self.changelog.checksize()
1674 1674 if d:
1675 1675 err(_("changeset data short %d bytes") % d)
1676 1676 for i in range(self.changelog.count()):
1677 1677 changesets += 1
1678 1678 n = self.changelog.node(i)
1679 1679 l = self.changelog.linkrev(n)
1680 1680 if l != i:
1681 1681 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1682 1682 if n in seen:
1683 1683 err(_("duplicate changeset at revision %d") % i)
1684 1684 seen[n] = 1
1685 1685
1686 1686 for p in self.changelog.parents(n):
1687 1687 if p not in self.changelog.nodemap:
1688 1688 err(_("changeset %s has unknown parent %s") %
1689 1689 (short(n), short(p)))
1690 1690 try:
1691 1691 changes = self.changelog.read(n)
1692 1692 except KeyboardInterrupt:
1693 1693 self.ui.warn(_("interrupted"))
1694 1694 raise
1695 1695 except Exception, inst:
1696 1696 err(_("unpacking changeset %s: %s") % (short(n), inst))
1697 1697
1698 1698 neededmanifests[changes[0]] = n
1699 1699
1700 1700 for f in changes[3]:
1701 1701 filelinkrevs.setdefault(f, []).append(i)
1702 1702
1703 1703 seen = {}
1704 1704 self.ui.status(_("checking manifests\n"))
1705 1705 d = self.manifest.checksize()
1706 1706 if d:
1707 1707 err(_("manifest data short %d bytes") % d)
1708 1708 for i in range(self.manifest.count()):
1709 1709 n = self.manifest.node(i)
1710 1710 l = self.manifest.linkrev(n)
1711 1711
1712 1712 if l < 0 or l >= self.changelog.count():
1713 1713 err(_("bad manifest link (%d) at revision %d") % (l, i))
1714 1714
1715 1715 if n in neededmanifests:
1716 1716 del neededmanifests[n]
1717 1717
1718 1718 if n in seen:
1719 1719 err(_("duplicate manifest at revision %d") % i)
1720 1720
1721 1721 seen[n] = 1
1722 1722
1723 1723 for p in self.manifest.parents(n):
1724 1724 if p not in self.manifest.nodemap:
1725 1725 err(_("manifest %s has unknown parent %s") %
1726 1726 (short(n), short(p)))
1727 1727
1728 1728 try:
1729 1729 delta = mdiff.patchtext(self.manifest.delta(n))
1730 1730 except KeyboardInterrupt:
1731 1731 self.ui.warn(_("interrupted"))
1732 1732 raise
1733 1733 except Exception, inst:
1734 1734 err(_("unpacking manifest %s: %s") % (short(n), inst))
1735 1735
1736 1736 ff = [ l.split('\0') for l in delta.splitlines() ]
1737 1737 for f, fn in ff:
1738 1738 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1739 1739
1740 1740 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1741 1741
1742 1742 for m, c in neededmanifests.items():
1743 1743 err(_("Changeset %s refers to unknown manifest %s") %
1744 1744 (short(m), short(c)))
1745 1745 del neededmanifests
1746 1746
1747 1747 for f in filenodes:
1748 1748 if f not in filelinkrevs:
1749 1749 err(_("file %s in manifest but not in changesets") % f)
1750 1750
1751 1751 for f in filelinkrevs:
1752 1752 if f not in filenodes:
1753 1753 err(_("file %s in changeset but not in manifest") % f)
1754 1754
1755 1755 self.ui.status(_("checking files\n"))
1756 1756 ff = filenodes.keys()
1757 1757 ff.sort()
1758 1758 for f in ff:
1759 1759 if f == "/dev/null":
1760 1760 continue
1761 1761 files += 1
1762 1762 fl = self.file(f)
1763 1763 d = fl.checksize()
1764 1764 if d:
1765 1765 err(_("%s file data short %d bytes") % (f, d))
1766 1766
1767 1767 nodes = {nullid: 1}
1768 1768 seen = {}
1769 1769 for i in range(fl.count()):
1770 1770 revisions += 1
1771 1771 n = fl.node(i)
1772 1772
1773 1773 if n in seen:
1774 1774 err(_("%s: duplicate revision %d") % (f, i))
1775 1775 if n not in filenodes[f]:
1776 1776 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1777 1777 else:
1778 1778 del filenodes[f][n]
1779 1779
1780 1780 flr = fl.linkrev(n)
1781 1781 if flr not in filelinkrevs[f]:
1782 1782 err(_("%s:%s points to unexpected changeset %d")
1783 1783 % (f, short(n), flr))
1784 1784 else:
1785 1785 filelinkrevs[f].remove(flr)
1786 1786
1787 1787 # verify contents
1788 1788 try:
1789 1789 t = fl.read(n)
1790 1790 except KeyboardInterrupt:
1791 1791 self.ui.warn(_("interrupted"))
1792 1792 raise
1793 1793 except Exception, inst:
1794 1794 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1795 1795
1796 1796 # verify parents
1797 1797 (p1, p2) = fl.parents(n)
1798 1798 if p1 not in nodes:
1799 1799 err(_("file %s:%s unknown parent 1 %s") %
1800 1800 (f, short(n), short(p1)))
1801 1801 if p2 not in nodes:
1802 1802 err(_("file %s:%s unknown parent 2 %s") %
1803 1803 (f, short(n), short(p1)))
1804 1804 nodes[n] = 1
1805 1805
1806 1806 # cross-check
1807 1807 for node in filenodes[f]:
1808 1808 err(_("node %s in manifests not in %s") % (hex(node), f))
1809 1809
1810 1810 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1811 1811 (files, changesets, revisions))
1812 1812
1813 1813 if errors[0]:
1814 1814 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1815 1815 return 1
General Comments 0
You need to be logged in to leave comments. Login now