##// END OF EJS Templates
Optimizing manifest reads in changegroupsubset by using deltas.
Eric Hopper -
r1462:12a8d772 default
parent child Browse files
Show More
@@ -1,1569 +1,1585
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
13 13
14 14 class localrepository:
15 15 def __init__(self, ui, path=None, create=0):
16 16 if not path:
17 17 p = os.getcwd()
18 18 while not os.path.isdir(os.path.join(p, ".hg")):
19 19 oldp = p
20 20 p = os.path.dirname(p)
21 21 if p == oldp: raise repo.RepoError("no repo found")
22 22 path = p
23 23 self.path = os.path.join(path, ".hg")
24 24
25 25 if not create and not os.path.isdir(self.path):
26 26 raise repo.RepoError("repository %s not found" % self.path)
27 27
28 28 self.root = os.path.abspath(path)
29 29 self.ui = ui
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.manifest = manifest.manifest(self.opener)
33 33 self.changelog = changelog.changelog(self.opener)
34 34 self.tagscache = None
35 35 self.nodetagscache = None
36 36 self.encodepats = None
37 37 self.decodepats = None
38 38
39 39 if create:
40 40 os.mkdir(self.path)
41 41 os.mkdir(self.join("data"))
42 42
43 43 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
44 44 try:
45 45 self.ui.readconfig(self.opener("hgrc"))
46 46 except IOError: pass
47 47
48 48 def hook(self, name, **args):
49 49 s = self.ui.config("hooks", name)
50 50 if s:
51 51 self.ui.note("running hook %s: %s\n" % (name, s))
52 52 old = {}
53 53 for k, v in args.items():
54 54 k = k.upper()
55 55 old[k] = os.environ.get(k, None)
56 56 os.environ[k] = v
57 57
58 58 # Hooks run in the repository root
59 59 olddir = os.getcwd()
60 60 os.chdir(self.root)
61 61 r = os.system(s)
62 62 os.chdir(olddir)
63 63
64 64 for k, v in old.items():
65 65 if v != None:
66 66 os.environ[k] = v
67 67 else:
68 68 del os.environ[k]
69 69
70 70 if r:
71 71 self.ui.warn("abort: %s hook failed with status %d!\n" %
72 72 (name, r))
73 73 return False
74 74 return True
75 75
76 76 def tags(self):
77 77 '''return a mapping of tag to node'''
78 78 if not self.tagscache:
79 79 self.tagscache = {}
80 80 def addtag(self, k, n):
81 81 try:
82 82 bin_n = bin(n)
83 83 except TypeError:
84 84 bin_n = ''
85 85 self.tagscache[k.strip()] = bin_n
86 86
87 87 try:
88 88 # read each head of the tags file, ending with the tip
89 89 # and add each tag found to the map, with "newer" ones
90 90 # taking precedence
91 91 fl = self.file(".hgtags")
92 92 h = fl.heads()
93 93 h.reverse()
94 94 for r in h:
95 95 for l in fl.read(r).splitlines():
96 96 if l:
97 97 n, k = l.split(" ", 1)
98 98 addtag(self, k, n)
99 99 except KeyError:
100 100 pass
101 101
102 102 try:
103 103 f = self.opener("localtags")
104 104 for l in f:
105 105 n, k = l.split(" ", 1)
106 106 addtag(self, k, n)
107 107 except IOError:
108 108 pass
109 109
110 110 self.tagscache['tip'] = self.changelog.tip()
111 111
112 112 return self.tagscache
113 113
114 114 def tagslist(self):
115 115 '''return a list of tags ordered by revision'''
116 116 l = []
117 117 for t, n in self.tags().items():
118 118 try:
119 119 r = self.changelog.rev(n)
120 120 except:
121 121 r = -2 # sort to the beginning of the list if unknown
122 122 l.append((r,t,n))
123 123 l.sort()
124 124 return [(t,n) for r,t,n in l]
125 125
126 126 def nodetags(self, node):
127 127 '''return the tags associated with a node'''
128 128 if not self.nodetagscache:
129 129 self.nodetagscache = {}
130 130 for t,n in self.tags().items():
131 131 self.nodetagscache.setdefault(n,[]).append(t)
132 132 return self.nodetagscache.get(node, [])
133 133
134 134 def lookup(self, key):
135 135 try:
136 136 return self.tags()[key]
137 137 except KeyError:
138 138 try:
139 139 return self.changelog.lookup(key)
140 140 except:
141 141 raise repo.RepoError("unknown revision '%s'" % key)
142 142
143 143 def dev(self):
144 144 return os.stat(self.path).st_dev
145 145
146 146 def local(self):
147 147 return True
148 148
149 149 def join(self, f):
150 150 return os.path.join(self.path, f)
151 151
152 152 def wjoin(self, f):
153 153 return os.path.join(self.root, f)
154 154
155 155 def file(self, f):
156 156 if f[0] == '/': f = f[1:]
157 157 return filelog.filelog(self.opener, f)
158 158
159 159 def getcwd(self):
160 160 return self.dirstate.getcwd()
161 161
162 162 def wfile(self, f, mode='r'):
163 163 return self.wopener(f, mode)
164 164
165 165 def wread(self, filename):
166 166 if self.encodepats == None:
167 167 l = []
168 168 for pat, cmd in self.ui.configitems("encode"):
169 169 mf = util.matcher("", "/", [pat], [], [])[1]
170 170 l.append((mf, cmd))
171 171 self.encodepats = l
172 172
173 173 data = self.wopener(filename, 'r').read()
174 174
175 175 for mf, cmd in self.encodepats:
176 176 if mf(filename):
177 177 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
178 178 data = util.filter(data, cmd)
179 179 break
180 180
181 181 return data
182 182
183 183 def wwrite(self, filename, data, fd=None):
184 184 if self.decodepats == None:
185 185 l = []
186 186 for pat, cmd in self.ui.configitems("decode"):
187 187 mf = util.matcher("", "/", [pat], [], [])[1]
188 188 l.append((mf, cmd))
189 189 self.decodepats = l
190 190
191 191 for mf, cmd in self.decodepats:
192 192 if mf(filename):
193 193 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
194 194 data = util.filter(data, cmd)
195 195 break
196 196
197 197 if fd:
198 198 return fd.write(data)
199 199 return self.wopener(filename, 'w').write(data)
200 200
201 201 def transaction(self):
202 202 # save dirstate for undo
203 203 try:
204 204 ds = self.opener("dirstate").read()
205 205 except IOError:
206 206 ds = ""
207 207 self.opener("journal.dirstate", "w").write(ds)
208 208
209 209 def after():
210 210 util.rename(self.join("journal"), self.join("undo"))
211 211 util.rename(self.join("journal.dirstate"),
212 212 self.join("undo.dirstate"))
213 213
214 214 return transaction.transaction(self.ui.warn, self.opener,
215 215 self.join("journal"), after)
216 216
217 217 def recover(self):
218 218 lock = self.lock()
219 219 if os.path.exists(self.join("journal")):
220 220 self.ui.status("rolling back interrupted transaction\n")
221 221 return transaction.rollback(self.opener, self.join("journal"))
222 222 else:
223 223 self.ui.warn("no interrupted transaction available\n")
224 224
225 225 def undo(self):
226 226 lock = self.lock()
227 227 if os.path.exists(self.join("undo")):
228 228 self.ui.status("rolling back last transaction\n")
229 229 transaction.rollback(self.opener, self.join("undo"))
230 230 self.dirstate = None
231 231 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
232 232 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
233 233 else:
234 234 self.ui.warn("no undo information available\n")
235 235
236 236 def lock(self, wait=1):
237 237 try:
238 238 return lock.lock(self.join("lock"), 0)
239 239 except lock.LockHeld, inst:
240 240 if wait:
241 241 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
242 242 return lock.lock(self.join("lock"), wait)
243 243 raise inst
244 244
245 245 def rawcommit(self, files, text, user, date, p1=None, p2=None):
246 246 orig_parent = self.dirstate.parents()[0] or nullid
247 247 p1 = p1 or self.dirstate.parents()[0] or nullid
248 248 p2 = p2 or self.dirstate.parents()[1] or nullid
249 249 c1 = self.changelog.read(p1)
250 250 c2 = self.changelog.read(p2)
251 251 m1 = self.manifest.read(c1[0])
252 252 mf1 = self.manifest.readflags(c1[0])
253 253 m2 = self.manifest.read(c2[0])
254 254 changed = []
255 255
256 256 if orig_parent == p1:
257 257 update_dirstate = 1
258 258 else:
259 259 update_dirstate = 0
260 260
261 261 tr = self.transaction()
262 262 mm = m1.copy()
263 263 mfm = mf1.copy()
264 264 linkrev = self.changelog.count()
265 265 for f in files:
266 266 try:
267 267 t = self.wread(f)
268 268 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
269 269 r = self.file(f)
270 270 mfm[f] = tm
271 271
272 272 fp1 = m1.get(f, nullid)
273 273 fp2 = m2.get(f, nullid)
274 274
275 275 # is the same revision on two branches of a merge?
276 276 if fp2 == fp1:
277 277 fp2 = nullid
278 278
279 279 if fp2 != nullid:
280 280 # is one parent an ancestor of the other?
281 281 fpa = r.ancestor(fp1, fp2)
282 282 if fpa == fp1:
283 283 fp1, fp2 = fp2, nullid
284 284 elif fpa == fp2:
285 285 fp2 = nullid
286 286
287 287 # is the file unmodified from the parent?
288 288 if t == r.read(fp1):
289 289 # record the proper existing parent in manifest
290 290 # no need to add a revision
291 291 mm[f] = fp1
292 292 continue
293 293
294 294 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
295 295 changed.append(f)
296 296 if update_dirstate:
297 297 self.dirstate.update([f], "n")
298 298 except IOError:
299 299 try:
300 300 del mm[f]
301 301 del mfm[f]
302 302 if update_dirstate:
303 303 self.dirstate.forget([f])
304 304 except:
305 305 # deleted from p2?
306 306 pass
307 307
308 308 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
309 309 user = user or self.ui.username()
310 310 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
311 311 tr.close()
312 312 if update_dirstate:
313 313 self.dirstate.setparents(n, nullid)
314 314
315 315 def commit(self, files = None, text = "", user = None, date = None,
316 316 match = util.always, force=False):
317 317 commit = []
318 318 remove = []
319 319 changed = []
320 320
321 321 if files:
322 322 for f in files:
323 323 s = self.dirstate.state(f)
324 324 if s in 'nmai':
325 325 commit.append(f)
326 326 elif s == 'r':
327 327 remove.append(f)
328 328 else:
329 329 self.ui.warn("%s not tracked!\n" % f)
330 330 else:
331 331 (c, a, d, u) = self.changes(match=match)
332 332 commit = c + a
333 333 remove = d
334 334
335 335 p1, p2 = self.dirstate.parents()
336 336 c1 = self.changelog.read(p1)
337 337 c2 = self.changelog.read(p2)
338 338 m1 = self.manifest.read(c1[0])
339 339 mf1 = self.manifest.readflags(c1[0])
340 340 m2 = self.manifest.read(c2[0])
341 341
342 342 if not commit and not remove and not force and p2 == nullid:
343 343 self.ui.status("nothing changed\n")
344 344 return None
345 345
346 346 if not self.hook("precommit"):
347 347 return None
348 348
349 349 lock = self.lock()
350 350 tr = self.transaction()
351 351
352 352 # check in files
353 353 new = {}
354 354 linkrev = self.changelog.count()
355 355 commit.sort()
356 356 for f in commit:
357 357 self.ui.note(f + "\n")
358 358 try:
359 359 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
360 360 t = self.wread(f)
361 361 except IOError:
362 362 self.ui.warn("trouble committing %s!\n" % f)
363 363 raise
364 364
365 365 r = self.file(f)
366 366
367 367 meta = {}
368 368 cp = self.dirstate.copied(f)
369 369 if cp:
370 370 meta["copy"] = cp
371 371 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
372 372 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
373 373 fp1, fp2 = nullid, nullid
374 374 else:
375 375 fp1 = m1.get(f, nullid)
376 376 fp2 = m2.get(f, nullid)
377 377
378 378 # is the same revision on two branches of a merge?
379 379 if fp2 == fp1:
380 380 fp2 = nullid
381 381
382 382 if fp2 != nullid:
383 383 # is one parent an ancestor of the other?
384 384 fpa = r.ancestor(fp1, fp2)
385 385 if fpa == fp1:
386 386 fp1, fp2 = fp2, nullid
387 387 elif fpa == fp2:
388 388 fp2 = nullid
389 389
390 390 # is the file unmodified from the parent?
391 391 if not meta and t == r.read(fp1):
392 392 # record the proper existing parent in manifest
393 393 # no need to add a revision
394 394 new[f] = fp1
395 395 continue
396 396
397 397 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
398 398 # remember what we've added so that we can later calculate
399 399 # the files to pull from a set of changesets
400 400 changed.append(f)
401 401
402 402 # update manifest
403 403 m1.update(new)
404 404 for f in remove:
405 405 if f in m1:
406 406 del m1[f]
407 407 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
408 408 (new, remove))
409 409
410 410 # add changeset
411 411 new = new.keys()
412 412 new.sort()
413 413
414 414 if not text:
415 415 edittext = ""
416 416 if p2 != nullid:
417 417 edittext += "HG: branch merge\n"
418 418 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
419 419 edittext += "".join(["HG: changed %s\n" % f for f in changed])
420 420 edittext += "".join(["HG: removed %s\n" % f for f in remove])
421 421 if not changed and not remove:
422 422 edittext += "HG: no files changed\n"
423 423 edittext = self.ui.edit(edittext)
424 424 if not edittext.rstrip():
425 425 return None
426 426 text = edittext
427 427
428 428 user = user or self.ui.username()
429 429 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
430 430 tr.close()
431 431
432 432 self.dirstate.setparents(n)
433 433 self.dirstate.update(new, "n")
434 434 self.dirstate.forget(remove)
435 435
436 436 if not self.hook("commit", node=hex(n)):
437 437 return None
438 438 return n
439 439
440 440 def walk(self, node=None, files=[], match=util.always):
441 441 if node:
442 442 for fn in self.manifest.read(self.changelog.read(node)[0]):
443 443 if match(fn): yield 'm', fn
444 444 else:
445 445 for src, fn in self.dirstate.walk(files, match):
446 446 yield src, fn
447 447
448 448 def changes(self, node1 = None, node2 = None, files = [],
449 449 match = util.always):
450 450 mf2, u = None, []
451 451
452 452 def fcmp(fn, mf):
453 453 t1 = self.wread(fn)
454 454 t2 = self.file(fn).read(mf.get(fn, nullid))
455 455 return cmp(t1, t2)
456 456
457 457 def mfmatches(node):
458 458 mf = dict(self.manifest.read(node))
459 459 for fn in mf.keys():
460 460 if not match(fn):
461 461 del mf[fn]
462 462 return mf
463 463
464 464 # are we comparing the working directory?
465 465 if not node2:
466 466 l, c, a, d, u = self.dirstate.changes(files, match)
467 467
468 468 # are we comparing working dir against its parent?
469 469 if not node1:
470 470 if l:
471 471 # do a full compare of any files that might have changed
472 472 change = self.changelog.read(self.dirstate.parents()[0])
473 473 mf2 = mfmatches(change[0])
474 474 for f in l:
475 475 if fcmp(f, mf2):
476 476 c.append(f)
477 477
478 478 for l in c, a, d, u:
479 479 l.sort()
480 480
481 481 return (c, a, d, u)
482 482
483 483 # are we comparing working dir against non-tip?
484 484 # generate a pseudo-manifest for the working dir
485 485 if not node2:
486 486 if not mf2:
487 487 change = self.changelog.read(self.dirstate.parents()[0])
488 488 mf2 = mfmatches(change[0])
489 489 for f in a + c + l:
490 490 mf2[f] = ""
491 491 for f in d:
492 492 if f in mf2: del mf2[f]
493 493 else:
494 494 change = self.changelog.read(node2)
495 495 mf2 = mfmatches(change[0])
496 496
497 497 # flush lists from dirstate before comparing manifests
498 498 c, a = [], []
499 499
500 500 change = self.changelog.read(node1)
501 501 mf1 = mfmatches(change[0])
502 502
503 503 for fn in mf2:
504 504 if mf1.has_key(fn):
505 505 if mf1[fn] != mf2[fn]:
506 506 if mf2[fn] != "" or fcmp(fn, mf1):
507 507 c.append(fn)
508 508 del mf1[fn]
509 509 else:
510 510 a.append(fn)
511 511
512 512 d = mf1.keys()
513 513
514 514 for l in c, a, d, u:
515 515 l.sort()
516 516
517 517 return (c, a, d, u)
518 518
519 519 def add(self, list):
520 520 for f in list:
521 521 p = self.wjoin(f)
522 522 if not os.path.exists(p):
523 523 self.ui.warn("%s does not exist!\n" % f)
524 524 elif not os.path.isfile(p):
525 525 self.ui.warn("%s not added: only files supported currently\n" % f)
526 526 elif self.dirstate.state(f) in 'an':
527 527 self.ui.warn("%s already tracked!\n" % f)
528 528 else:
529 529 self.dirstate.update([f], "a")
530 530
531 531 def forget(self, list):
532 532 for f in list:
533 533 if self.dirstate.state(f) not in 'ai':
534 534 self.ui.warn("%s not added!\n" % f)
535 535 else:
536 536 self.dirstate.forget([f])
537 537
538 538 def remove(self, list):
539 539 for f in list:
540 540 p = self.wjoin(f)
541 541 if os.path.exists(p):
542 542 self.ui.warn("%s still exists!\n" % f)
543 543 elif self.dirstate.state(f) == 'a':
544 544 self.ui.warn("%s never committed!\n" % f)
545 545 self.dirstate.forget([f])
546 546 elif f not in self.dirstate:
547 547 self.ui.warn("%s not tracked!\n" % f)
548 548 else:
549 549 self.dirstate.update([f], "r")
550 550
551 551 def copy(self, source, dest):
552 552 p = self.wjoin(dest)
553 553 if not os.path.exists(p):
554 554 self.ui.warn("%s does not exist!\n" % dest)
555 555 elif not os.path.isfile(p):
556 556 self.ui.warn("copy failed: %s is not a file\n" % dest)
557 557 else:
558 558 if self.dirstate.state(dest) == '?':
559 559 self.dirstate.update([dest], "a")
560 560 self.dirstate.copy(source, dest)
561 561
562 562 def heads(self):
563 563 return self.changelog.heads()
564 564
565 565 # branchlookup returns a dict giving a list of branches for
566 566 # each head. A branch is defined as the tag of a node or
567 567 # the branch of the node's parents. If a node has multiple
568 568 # branch tags, tags are eliminated if they are visible from other
569 569 # branch tags.
570 570 #
571 571 # So, for this graph: a->b->c->d->e
572 572 # \ /
573 573 # aa -----/
574 574 # a has tag 2.6.12
575 575 # d has tag 2.6.13
576 576 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
577 577 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
578 578 # from the list.
579 579 #
580 580 # It is possible that more than one head will have the same branch tag.
581 581 # callers need to check the result for multiple heads under the same
582 582 # branch tag if that is a problem for them (ie checkout of a specific
583 583 # branch).
584 584 #
585 585 # passing in a specific branch will limit the depth of the search
586 586 # through the parents. It won't limit the branches returned in the
587 587 # result though.
588 588 def branchlookup(self, heads=None, branch=None):
589 589 if not heads:
590 590 heads = self.heads()
591 591 headt = [ h for h in heads ]
592 592 chlog = self.changelog
593 593 branches = {}
594 594 merges = []
595 595 seenmerge = {}
596 596
597 597 # traverse the tree once for each head, recording in the branches
598 598 # dict which tags are visible from this head. The branches
599 599 # dict also records which tags are visible from each tag
600 600 # while we traverse.
601 601 while headt or merges:
602 602 if merges:
603 603 n, found = merges.pop()
604 604 visit = [n]
605 605 else:
606 606 h = headt.pop()
607 607 visit = [h]
608 608 found = [h]
609 609 seen = {}
610 610 while visit:
611 611 n = visit.pop()
612 612 if n in seen:
613 613 continue
614 614 pp = chlog.parents(n)
615 615 tags = self.nodetags(n)
616 616 if tags:
617 617 for x in tags:
618 618 if x == 'tip':
619 619 continue
620 620 for f in found:
621 621 branches.setdefault(f, {})[n] = 1
622 622 branches.setdefault(n, {})[n] = 1
623 623 break
624 624 if n not in found:
625 625 found.append(n)
626 626 if branch in tags:
627 627 continue
628 628 seen[n] = 1
629 629 if pp[1] != nullid and n not in seenmerge:
630 630 merges.append((pp[1], [x for x in found]))
631 631 seenmerge[n] = 1
632 632 if pp[0] != nullid:
633 633 visit.append(pp[0])
634 634 # traverse the branches dict, eliminating branch tags from each
635 635 # head that are visible from another branch tag for that head.
636 636 out = {}
637 637 viscache = {}
638 638 for h in heads:
639 639 def visible(node):
640 640 if node in viscache:
641 641 return viscache[node]
642 642 ret = {}
643 643 visit = [node]
644 644 while visit:
645 645 x = visit.pop()
646 646 if x in viscache:
647 647 ret.update(viscache[x])
648 648 elif x not in ret:
649 649 ret[x] = 1
650 650 if x in branches:
651 651 visit[len(visit):] = branches[x].keys()
652 652 viscache[node] = ret
653 653 return ret
654 654 if h not in branches:
655 655 continue
656 656 # O(n^2), but somewhat limited. This only searches the
657 657 # tags visible from a specific head, not all the tags in the
658 658 # whole repo.
659 659 for b in branches[h]:
660 660 vis = False
661 661 for bb in branches[h].keys():
662 662 if b != bb:
663 663 if b in visible(bb):
664 664 vis = True
665 665 break
666 666 if not vis:
667 667 l = out.setdefault(h, [])
668 668 l[len(l):] = self.nodetags(b)
669 669 return out
670 670
671 671 def branches(self, nodes):
672 672 if not nodes: nodes = [self.changelog.tip()]
673 673 b = []
674 674 for n in nodes:
675 675 t = n
676 676 while n:
677 677 p = self.changelog.parents(n)
678 678 if p[1] != nullid or p[0] == nullid:
679 679 b.append((t, n, p[0], p[1]))
680 680 break
681 681 n = p[0]
682 682 return b
683 683
684 684 def between(self, pairs):
685 685 r = []
686 686
687 687 for top, bottom in pairs:
688 688 n, l, i = top, [], 0
689 689 f = 1
690 690
691 691 while n != bottom:
692 692 p = self.changelog.parents(n)[0]
693 693 if i == f:
694 694 l.append(n)
695 695 f = f * 2
696 696 n = p
697 697 i += 1
698 698
699 699 r.append(l)
700 700
701 701 return r
702 702
703 703 def findincoming(self, remote, base=None, heads=None):
704 704 m = self.changelog.nodemap
705 705 search = []
706 706 fetch = {}
707 707 seen = {}
708 708 seenbranch = {}
709 709 if base == None:
710 710 base = {}
711 711
712 712 # assume we're closer to the tip than the root
713 713 # and start by examining the heads
714 714 self.ui.status("searching for changes\n")
715 715
716 716 if not heads:
717 717 heads = remote.heads()
718 718
719 719 unknown = []
720 720 for h in heads:
721 721 if h not in m:
722 722 unknown.append(h)
723 723 else:
724 724 base[h] = 1
725 725
726 726 if not unknown:
727 727 return None
728 728
729 729 rep = {}
730 730 reqcnt = 0
731 731
732 732 # search through remote branches
733 733 # a 'branch' here is a linear segment of history, with four parts:
734 734 # head, root, first parent, second parent
735 735 # (a branch always has two parents (or none) by definition)
736 736 unknown = remote.branches(unknown)
737 737 while unknown:
738 738 r = []
739 739 while unknown:
740 740 n = unknown.pop(0)
741 741 if n[0] in seen:
742 742 continue
743 743
744 744 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
745 745 if n[0] == nullid:
746 746 break
747 747 if n in seenbranch:
748 748 self.ui.debug("branch already found\n")
749 749 continue
750 750 if n[1] and n[1] in m: # do we know the base?
751 751 self.ui.debug("found incomplete branch %s:%s\n"
752 752 % (short(n[0]), short(n[1])))
753 753 search.append(n) # schedule branch range for scanning
754 754 seenbranch[n] = 1
755 755 else:
756 756 if n[1] not in seen and n[1] not in fetch:
757 757 if n[2] in m and n[3] in m:
758 758 self.ui.debug("found new changeset %s\n" %
759 759 short(n[1]))
760 760 fetch[n[1]] = 1 # earliest unknown
761 761 base[n[2]] = 1 # latest known
762 762 continue
763 763
764 764 for a in n[2:4]:
765 765 if a not in rep:
766 766 r.append(a)
767 767 rep[a] = 1
768 768
769 769 seen[n[0]] = 1
770 770
771 771 if r:
772 772 reqcnt += 1
773 773 self.ui.debug("request %d: %s\n" %
774 774 (reqcnt, " ".join(map(short, r))))
775 775 for p in range(0, len(r), 10):
776 776 for b in remote.branches(r[p:p+10]):
777 777 self.ui.debug("received %s:%s\n" %
778 778 (short(b[0]), short(b[1])))
779 779 if b[0] in m:
780 780 self.ui.debug("found base node %s\n" % short(b[0]))
781 781 base[b[0]] = 1
782 782 elif b[0] not in seen:
783 783 unknown.append(b)
784 784
785 785 # do binary search on the branches we found
786 786 while search:
787 787 n = search.pop(0)
788 788 reqcnt += 1
789 789 l = remote.between([(n[0], n[1])])[0]
790 790 l.append(n[1])
791 791 p = n[0]
792 792 f = 1
793 793 for i in l:
794 794 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
795 795 if i in m:
796 796 if f <= 2:
797 797 self.ui.debug("found new branch changeset %s\n" %
798 798 short(p))
799 799 fetch[p] = 1
800 800 base[i] = 1
801 801 else:
802 802 self.ui.debug("narrowed branch search to %s:%s\n"
803 803 % (short(p), short(i)))
804 804 search.append((p, i))
805 805 break
806 806 p, f = i, f * 2
807 807
808 808 # sanity check our fetch list
809 809 for f in fetch.keys():
810 810 if f in m:
811 811 raise repo.RepoError("already have changeset " + short(f[:4]))
812 812
813 813 if base.keys() == [nullid]:
814 814 self.ui.warn("warning: pulling from an unrelated repository!\n")
815 815
816 816 self.ui.note("found new changesets starting at " +
817 817 " ".join([short(f) for f in fetch]) + "\n")
818 818
819 819 self.ui.debug("%d total queries\n" % reqcnt)
820 820
821 821 return fetch.keys()
822 822
823 823 def findoutgoing(self, remote, base=None, heads=None):
824 824 if base == None:
825 825 base = {}
826 826 self.findincoming(remote, base, heads)
827 827
828 828 self.ui.debug("common changesets up to "
829 829 + " ".join(map(short, base.keys())) + "\n")
830 830
831 831 remain = dict.fromkeys(self.changelog.nodemap)
832 832
833 833 # prune everything remote has from the tree
834 834 del remain[nullid]
835 835 remove = base.keys()
836 836 while remove:
837 837 n = remove.pop(0)
838 838 if n in remain:
839 839 del remain[n]
840 840 for p in self.changelog.parents(n):
841 841 remove.append(p)
842 842
843 843 # find every node whose parents have been pruned
844 844 subset = []
845 845 for n in remain:
846 846 p1, p2 = self.changelog.parents(n)
847 847 if p1 not in remain and p2 not in remain:
848 848 subset.append(n)
849 849
850 850 # this is the set of all roots we have to push
851 851 return subset
852 852
853 853 def pull(self, remote, heads = None):
854 854 lock = self.lock()
855 855
856 856 # if we have an empty repo, fetch everything
857 857 if self.changelog.tip() == nullid:
858 858 self.ui.status("requesting all changes\n")
859 859 fetch = [nullid]
860 860 else:
861 861 fetch = self.findincoming(remote)
862 862
863 863 if not fetch:
864 864 self.ui.status("no changes found\n")
865 865 return 1
866 866
867 867 if heads is None:
868 868 cg = remote.changegroup(fetch)
869 869 else:
870 870 cg = remote.changegroupsubset(fetch, heads)
871 871 return self.addchangegroup(cg)
872 872
873 873 def push(self, remote, force=False):
874 874 lock = remote.lock()
875 875
876 876 base = {}
877 877 heads = remote.heads()
878 878 inc = self.findincoming(remote, base, heads)
879 879 if not force and inc:
880 880 self.ui.warn("abort: unsynced remote changes!\n")
881 881 self.ui.status("(did you forget to sync? use push -f to force)\n")
882 882 return 1
883 883
884 884 update = self.findoutgoing(remote, base)
885 885 if not update:
886 886 self.ui.status("no changes found\n")
887 887 return 1
888 888 elif not force:
889 889 if len(heads) < len(self.changelog.heads()):
890 890 self.ui.warn("abort: push creates new remote branches!\n")
891 891 self.ui.status("(did you forget to merge?" +
892 892 " use push -f to force)\n")
893 893 return 1
894 894
895 895 cg = self.changegroup(update)
896 896 return remote.addchangegroup(cg)
897 897
898 898 def changegroupsubset(self, bases, heads):
899 899 cl = self.changelog
900 900 # msng = missing
901 901 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
902 902 junk = None
903 903 knownheads = {}
904 904 for n in bases:
905 905 for p in cl.parents(n):
906 906 if p != nullid:
907 907 knownheads[p] = 1
908 908 knownheads = knownheads.keys()
909 909 if knownheads:
910 910 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
911 911 has_cl_set = dict.fromkeys(hasnodeset)
912 912 else:
913 913 has_cl_set = {}
914 914
915 915 mnfst = self.manifest
916 916 msng_mnfst_set = {}
917 917 msng_filenode_set = {}
918 918
919 919 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
920 920 junk = None
921 921
922 922 def identity(x):
923 923 return x
924 924
925 925 def cmp_by_rev_func(revlog):
926 926 def cmpfunc(a, b):
927 927 return cmp(revlog.rev(a), revlog.rev(b))
928 928 return cmpfunc
929 929
930 930 def prune_parents(revlog, hasset, msngset):
931 931 haslst = hasset.keys()
932 932 haslst.sort(cmp_by_rev_func(revlog))
933 933 for node in haslst:
934 934 parentlst = [p for p in revlog.parents(node) if p != nullid]
935 935 while parentlst:
936 936 n = parentlst.pop()
937 937 if n not in hasset:
938 938 hasset[n] = 1
939 939 p = [p for p in revlog.parents(n) if p != nullid]
940 940 parentlst.extend(p)
941 941 for n in hasset:
942 942 msngset.pop(n, None)
943 943
944 944 def manifest_and_file_collector(changedfileset):
945 945 def collect_manifests_and_files(clnode):
946 946 c = cl.read(clnode)
947 947 for f in c[3]:
948 948 # This is to make sure we only have one instance of each
949 949 # filename string for each filename.
950 950 changedfileset.setdefault(f, f)
951 951 msng_mnfst_set.setdefault(c[0], clnode)
952 952 return collect_manifests_and_files
953 953
954 954 def prune_manifests():
955 955 has_mnfst_set = {}
956 956 for n in msng_mnfst_set:
957 957 linknode = cl.node(mnfst.linkrev(n))
958 958 if linknode in has_cl_set:
959 959 has_mnfst_set[n] = 1
960 960 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
961 961
962 962 def lookup_manifest_link(mnfstnode):
963 963 return msng_mnfst_set[mnfstnode]
964 964
965 965 def filenode_collector(changedfiles):
966 next_rev = [0]
966 967 def collect_msng_filenodes(mnfstnode):
967 m = mnfst.read(mnfstnode)
968 for f in changedfiles:
969 fnode = m.get(f, None)
970 if fnode is not None:
971 clnode = msng_mnfst_set[mnfstnode]
972 ndset = msng_filenode_set.setdefault(f, {})
973 ndset.setdefault(fnode, clnode)
968 r = mnfst.rev(mnfstnode)
969 if r == next_rev[0]:
970 # If the last rev we looked at was the one just previous,
971 # we only need to see a diff.
972 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
973 for dline in delta.splitlines():
974 f, fnode = dline.split('\0')
975 fnode = bin(fnode[:40])
976 f = changedfiles.get(f, None)
977 if f is not None:
978 clnode = msng_mnfst_set[mnfstnode]
979 ndset = msng_filenode_set.setdefault(f, {})
980 ndset.setdefault(fnode, clnode)
981 else:
982 m = mnfst.read(mnfstnode)
983 for f in changedfiles:
984 fnode = m.get(f, None)
985 if fnode is not None:
986 clnode = msng_mnfst_set[mnfstnode]
987 ndset = msng_filenode_set.setdefault(f, {})
988 ndset.setdefault(fnode, clnode)
989 next_rev[0] = r + 1
974 990 return collect_msng_filenodes
975 991
976 992 def prune_filenodes(f, filerevlog):
977 993 msngset = msng_filenode_set[f]
978 994 hasset = {}
979 995 for n in msngset:
980 996 clnode = cl.node(filerevlog.linkrev(n))
981 997 if clnode in has_cl_set:
982 998 hasset[n] = 1
983 999 prune_parents(filerevlog, hasset, msngset)
984 1000
985 1001 def lookup_filenode_link_func(fname):
986 1002 msngset = msng_filenode_set[fname]
987 1003 def lookup_filenode_link(fnode):
988 1004 return msngset[fnode]
989 1005 return lookup_filenode_link
990 1006
991 1007 def gengroup():
992 1008 changedfiles = {}
993 1009 group = cl.group(msng_cl_lst, identity,
994 1010 manifest_and_file_collector(changedfiles))
995 1011 for chnk in group:
996 1012 yield chnk
997 1013 prune_manifests()
998 1014 msng_mnfst_lst = msng_mnfst_set.keys()
999 1015 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1000 changedfiles = changedfiles.keys()
1001 changedfiles.sort()
1002 1016 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1003 1017 filenode_collector(changedfiles))
1004 1018 for chnk in group:
1005 1019 yield chnk
1006 1020 msng_mnfst_lst = None
1007 1021 msng_mnfst_set.clear()
1022 changedfiles = changedfiles.keys()
1023 changedfiles.sort()
1008 1024 for fname in changedfiles:
1009 1025 filerevlog = self.file(fname)
1010 1026 prune_filenodes(fname, filerevlog)
1011 1027 msng_filenode_lst = msng_filenode_set[fname].keys()
1012 1028 if len(msng_filenode_lst) > 0:
1013 1029 yield struct.pack(">l", len(fname) + 4) + fname
1014 1030 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1015 1031 group = filerevlog.group(msng_filenode_lst,
1016 1032 lookup_filenode_link_func(fname))
1017 1033 for chnk in group:
1018 1034 yield chnk
1019 1035 del msng_filenode_set[fname]
1020 1036 yield struct.pack(">l", 0)
1021 1037
1022 1038 return util.chunkbuffer(gengroup())
1023 1039
1024 1040 def changegroup(self, basenodes):
1025 1041 cl = self.changelog
1026 1042 nodes = cl.nodesbetween(basenodes, None)[0]
1027 1043 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1028 1044
1029 1045 def identity(x):
1030 1046 return x
1031 1047
1032 1048 def gennodelst(revlog):
1033 1049 for r in xrange(0, revlog.count()):
1034 1050 n = revlog.node(r)
1035 1051 if revlog.linkrev(n) in revset:
1036 1052 yield n
1037 1053
1038 1054 def changed_file_collector(changedfileset):
1039 1055 def collect_changed_files(clnode):
1040 1056 c = cl.read(clnode)
1041 1057 for fname in c[3]:
1042 1058 changedfileset[fname] = 1
1043 1059 return collect_changed_files
1044 1060
1045 1061 def lookuprevlink_func(revlog):
1046 1062 def lookuprevlink(n):
1047 1063 return cl.node(revlog.linkrev(n))
1048 1064 return lookuprevlink
1049 1065
1050 1066 def gengroup():
1051 1067 # construct a list of all changed files
1052 1068 changedfiles = {}
1053 1069
1054 1070 for chnk in cl.group(nodes, identity,
1055 1071 changed_file_collector(changedfiles)):
1056 1072 yield chnk
1057 1073 changedfiles = changedfiles.keys()
1058 1074 changedfiles.sort()
1059 1075
1060 1076 mnfst = self.manifest
1061 1077 nodeiter = gennodelst(mnfst)
1062 1078 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1063 1079 yield chnk
1064 1080
1065 1081 for fname in changedfiles:
1066 1082 filerevlog = self.file(fname)
1067 1083 nodeiter = gennodelst(filerevlog)
1068 1084 nodeiter = list(nodeiter)
1069 1085 if nodeiter:
1070 1086 yield struct.pack(">l", len(fname) + 4) + fname
1071 1087 lookup = lookuprevlink_func(filerevlog)
1072 1088 for chnk in filerevlog.group(nodeiter, lookup):
1073 1089 yield chnk
1074 1090
1075 1091 yield struct.pack(">l", 0)
1076 1092
1077 1093 return util.chunkbuffer(gengroup())
1078 1094
1079 1095 def addchangegroup(self, source):
1080 1096
1081 1097 def getchunk():
1082 1098 d = source.read(4)
1083 1099 if not d: return ""
1084 1100 l = struct.unpack(">l", d)[0]
1085 1101 if l <= 4: return ""
1086 1102 d = source.read(l - 4)
1087 1103 if len(d) < l - 4:
1088 1104 raise repo.RepoError("premature EOF reading chunk" +
1089 1105 " (got %d bytes, expected %d)"
1090 1106 % (len(d), l - 4))
1091 1107 return d
1092 1108
1093 1109 def getgroup():
1094 1110 while 1:
1095 1111 c = getchunk()
1096 1112 if not c: break
1097 1113 yield c
1098 1114
1099 1115 def csmap(x):
1100 1116 self.ui.debug("add changeset %s\n" % short(x))
1101 1117 return self.changelog.count()
1102 1118
1103 1119 def revmap(x):
1104 1120 return self.changelog.rev(x)
1105 1121
1106 1122 if not source: return
1107 1123 changesets = files = revisions = 0
1108 1124
1109 1125 tr = self.transaction()
1110 1126
1111 1127 oldheads = len(self.changelog.heads())
1112 1128
1113 1129 # pull off the changeset group
1114 1130 self.ui.status("adding changesets\n")
1115 1131 co = self.changelog.tip()
1116 1132 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1117 1133 cnr, cor = map(self.changelog.rev, (cn, co))
1118 1134 if cn == nullid:
1119 1135 cnr = cor
1120 1136 changesets = cnr - cor
1121 1137
1122 1138 # pull off the manifest group
1123 1139 self.ui.status("adding manifests\n")
1124 1140 mm = self.manifest.tip()
1125 1141 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1126 1142
1127 1143 # process the files
1128 1144 self.ui.status("adding file changes\n")
1129 1145 while 1:
1130 1146 f = getchunk()
1131 1147 if not f: break
1132 1148 self.ui.debug("adding %s revisions\n" % f)
1133 1149 fl = self.file(f)
1134 1150 o = fl.count()
1135 1151 n = fl.addgroup(getgroup(), revmap, tr)
1136 1152 revisions += fl.count() - o
1137 1153 files += 1
1138 1154
1139 1155 newheads = len(self.changelog.heads())
1140 1156 heads = ""
1141 1157 if oldheads and newheads > oldheads:
1142 1158 heads = " (+%d heads)" % (newheads - oldheads)
1143 1159
1144 1160 self.ui.status(("added %d changesets" +
1145 1161 " with %d changes to %d files%s\n")
1146 1162 % (changesets, revisions, files, heads))
1147 1163
1148 1164 tr.close()
1149 1165
1150 1166 if changesets > 0:
1151 1167 if not self.hook("changegroup",
1152 1168 node=hex(self.changelog.node(cor+1))):
1153 1169 self.ui.warn("abort: changegroup hook returned failure!\n")
1154 1170 return 1
1155 1171
1156 1172 for i in range(cor + 1, cnr + 1):
1157 1173 self.hook("commit", node=hex(self.changelog.node(i)))
1158 1174
1159 1175 return
1160 1176
1161 1177 def update(self, node, allow=False, force=False, choose=None,
1162 1178 moddirstate=True):
1163 1179 pl = self.dirstate.parents()
1164 1180 if not force and pl[1] != nullid:
1165 1181 self.ui.warn("aborting: outstanding uncommitted merges\n")
1166 1182 return 1
1167 1183
1168 1184 p1, p2 = pl[0], node
1169 1185 pa = self.changelog.ancestor(p1, p2)
1170 1186 m1n = self.changelog.read(p1)[0]
1171 1187 m2n = self.changelog.read(p2)[0]
1172 1188 man = self.manifest.ancestor(m1n, m2n)
1173 1189 m1 = self.manifest.read(m1n)
1174 1190 mf1 = self.manifest.readflags(m1n)
1175 1191 m2 = self.manifest.read(m2n)
1176 1192 mf2 = self.manifest.readflags(m2n)
1177 1193 ma = self.manifest.read(man)
1178 1194 mfa = self.manifest.readflags(man)
1179 1195
1180 1196 (c, a, d, u) = self.changes()
1181 1197
1182 1198 # is this a jump, or a merge? i.e. is there a linear path
1183 1199 # from p1 to p2?
1184 1200 linear_path = (pa == p1 or pa == p2)
1185 1201
1186 1202 # resolve the manifest to determine which files
1187 1203 # we care about merging
1188 1204 self.ui.note("resolving manifests\n")
1189 1205 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1190 1206 (force, allow, moddirstate, linear_path))
1191 1207 self.ui.debug(" ancestor %s local %s remote %s\n" %
1192 1208 (short(man), short(m1n), short(m2n)))
1193 1209
1194 1210 merge = {}
1195 1211 get = {}
1196 1212 remove = []
1197 1213
1198 1214 # construct a working dir manifest
1199 1215 mw = m1.copy()
1200 1216 mfw = mf1.copy()
1201 1217 umap = dict.fromkeys(u)
1202 1218
1203 1219 for f in a + c + u:
1204 1220 mw[f] = ""
1205 1221 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1206 1222
1207 1223 for f in d:
1208 1224 if f in mw: del mw[f]
1209 1225
1210 1226 # If we're jumping between revisions (as opposed to merging),
1211 1227 # and if neither the working directory nor the target rev has
1212 1228 # the file, then we need to remove it from the dirstate, to
1213 1229 # prevent the dirstate from listing the file when it is no
1214 1230 # longer in the manifest.
1215 1231 if moddirstate and linear_path and f not in m2:
1216 1232 self.dirstate.forget((f,))
1217 1233
1218 1234 # Compare manifests
1219 1235 for f, n in mw.iteritems():
1220 1236 if choose and not choose(f): continue
1221 1237 if f in m2:
1222 1238 s = 0
1223 1239
1224 1240 # is the wfile new since m1, and match m2?
1225 1241 if f not in m1:
1226 1242 t1 = self.wread(f)
1227 1243 t2 = self.file(f).read(m2[f])
1228 1244 if cmp(t1, t2) == 0:
1229 1245 n = m2[f]
1230 1246 del t1, t2
1231 1247
1232 1248 # are files different?
1233 1249 if n != m2[f]:
1234 1250 a = ma.get(f, nullid)
1235 1251 # are both different from the ancestor?
1236 1252 if n != a and m2[f] != a:
1237 1253 self.ui.debug(" %s versions differ, resolve\n" % f)
1238 1254 # merge executable bits
1239 1255 # "if we changed or they changed, change in merge"
1240 1256 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1241 1257 mode = ((a^b) | (a^c)) ^ a
1242 1258 merge[f] = (m1.get(f, nullid), m2[f], mode)
1243 1259 s = 1
1244 1260 # are we clobbering?
1245 1261 # is remote's version newer?
1246 1262 # or are we going back in time?
1247 1263 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1248 1264 self.ui.debug(" remote %s is newer, get\n" % f)
1249 1265 get[f] = m2[f]
1250 1266 s = 1
1251 1267 elif f in umap:
1252 1268 # this unknown file is the same as the checkout
1253 1269 get[f] = m2[f]
1254 1270
1255 1271 if not s and mfw[f] != mf2[f]:
1256 1272 if force:
1257 1273 self.ui.debug(" updating permissions for %s\n" % f)
1258 1274 util.set_exec(self.wjoin(f), mf2[f])
1259 1275 else:
1260 1276 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1261 1277 mode = ((a^b) | (a^c)) ^ a
1262 1278 if mode != b:
1263 1279 self.ui.debug(" updating permissions for %s\n" % f)
1264 1280 util.set_exec(self.wjoin(f), mode)
1265 1281 del m2[f]
1266 1282 elif f in ma:
1267 1283 if n != ma[f]:
1268 1284 r = "d"
1269 1285 if not force and (linear_path or allow):
1270 1286 r = self.ui.prompt(
1271 1287 (" local changed %s which remote deleted\n" % f) +
1272 1288 "(k)eep or (d)elete?", "[kd]", "k")
1273 1289 if r == "d":
1274 1290 remove.append(f)
1275 1291 else:
1276 1292 self.ui.debug("other deleted %s\n" % f)
1277 1293 remove.append(f) # other deleted it
1278 1294 else:
1279 1295 # file is created on branch or in working directory
1280 1296 if force and f not in umap:
1281 1297 self.ui.debug("remote deleted %s, clobbering\n" % f)
1282 1298 remove.append(f)
1283 1299 elif n == m1.get(f, nullid): # same as parent
1284 1300 if p2 == pa: # going backwards?
1285 1301 self.ui.debug("remote deleted %s\n" % f)
1286 1302 remove.append(f)
1287 1303 else:
1288 1304 self.ui.debug("local modified %s, keeping\n" % f)
1289 1305 else:
1290 1306 self.ui.debug("working dir created %s, keeping\n" % f)
1291 1307
1292 1308 for f, n in m2.iteritems():
1293 1309 if choose and not choose(f): continue
1294 1310 if f[0] == "/": continue
1295 1311 if f in ma and n != ma[f]:
1296 1312 r = "k"
1297 1313 if not force and (linear_path or allow):
1298 1314 r = self.ui.prompt(
1299 1315 ("remote changed %s which local deleted\n" % f) +
1300 1316 "(k)eep or (d)elete?", "[kd]", "k")
1301 1317 if r == "k": get[f] = n
1302 1318 elif f not in ma:
1303 1319 self.ui.debug("remote created %s\n" % f)
1304 1320 get[f] = n
1305 1321 else:
1306 1322 if force or p2 == pa: # going backwards?
1307 1323 self.ui.debug("local deleted %s, recreating\n" % f)
1308 1324 get[f] = n
1309 1325 else:
1310 1326 self.ui.debug("local deleted %s\n" % f)
1311 1327
1312 1328 del mw, m1, m2, ma
1313 1329
1314 1330 if force:
1315 1331 for f in merge:
1316 1332 get[f] = merge[f][1]
1317 1333 merge = {}
1318 1334
1319 1335 if linear_path or force:
1320 1336 # we don't need to do any magic, just jump to the new rev
1321 1337 branch_merge = False
1322 1338 p1, p2 = p2, nullid
1323 1339 else:
1324 1340 if not allow:
1325 1341 self.ui.status("this update spans a branch" +
1326 1342 " affecting the following files:\n")
1327 1343 fl = merge.keys() + get.keys()
1328 1344 fl.sort()
1329 1345 for f in fl:
1330 1346 cf = ""
1331 1347 if f in merge: cf = " (resolve)"
1332 1348 self.ui.status(" %s%s\n" % (f, cf))
1333 1349 self.ui.warn("aborting update spanning branches!\n")
1334 1350 self.ui.status("(use update -m to merge across branches" +
1335 1351 " or -C to lose changes)\n")
1336 1352 return 1
1337 1353 branch_merge = True
1338 1354
1339 1355 if moddirstate:
1340 1356 self.dirstate.setparents(p1, p2)
1341 1357
1342 1358 # get the files we don't need to change
1343 1359 files = get.keys()
1344 1360 files.sort()
1345 1361 for f in files:
1346 1362 if f[0] == "/": continue
1347 1363 self.ui.note("getting %s\n" % f)
1348 1364 t = self.file(f).read(get[f])
1349 1365 try:
1350 1366 self.wwrite(f, t)
1351 1367 except IOError, e:
1352 1368 if e.errno != errno.ENOENT:
1353 1369 raise
1354 1370 os.makedirs(os.path.dirname(self.wjoin(f)))
1355 1371 self.wwrite(f, t)
1356 1372 util.set_exec(self.wjoin(f), mf2[f])
1357 1373 if moddirstate:
1358 1374 if branch_merge:
1359 1375 self.dirstate.update([f], 'n', st_mtime=-1)
1360 1376 else:
1361 1377 self.dirstate.update([f], 'n')
1362 1378
1363 1379 # merge the tricky bits
1364 1380 files = merge.keys()
1365 1381 files.sort()
1366 1382 for f in files:
1367 1383 self.ui.status("merging %s\n" % f)
1368 1384 my, other, flag = merge[f]
1369 1385 self.merge3(f, my, other)
1370 1386 util.set_exec(self.wjoin(f), flag)
1371 1387 if moddirstate:
1372 1388 if branch_merge:
1373 1389 # We've done a branch merge, mark this file as merged
1374 1390 # so that we properly record the merger later
1375 1391 self.dirstate.update([f], 'm')
1376 1392 else:
1377 1393 # We've update-merged a locally modified file, so
1378 1394 # we set the dirstate to emulate a normal checkout
1379 1395 # of that file some time in the past. Thus our
1380 1396 # merge will appear as a normal local file
1381 1397 # modification.
1382 1398 f_len = len(self.file(f).read(other))
1383 1399 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1384 1400
1385 1401 remove.sort()
1386 1402 for f in remove:
1387 1403 self.ui.note("removing %s\n" % f)
1388 1404 try:
1389 1405 os.unlink(self.wjoin(f))
1390 1406 except OSError, inst:
1391 1407 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1392 1408 # try removing directories that might now be empty
1393 1409 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1394 1410 except: pass
1395 1411 if moddirstate:
1396 1412 if branch_merge:
1397 1413 self.dirstate.update(remove, 'r')
1398 1414 else:
1399 1415 self.dirstate.forget(remove)
1400 1416
1401 1417 def merge3(self, fn, my, other):
1402 1418 """perform a 3-way merge in the working directory"""
1403 1419
1404 1420 def temp(prefix, node):
1405 1421 pre = "%s~%s." % (os.path.basename(fn), prefix)
1406 1422 (fd, name) = tempfile.mkstemp("", pre)
1407 1423 f = os.fdopen(fd, "wb")
1408 1424 self.wwrite(fn, fl.read(node), f)
1409 1425 f.close()
1410 1426 return name
1411 1427
1412 1428 fl = self.file(fn)
1413 1429 base = fl.ancestor(my, other)
1414 1430 a = self.wjoin(fn)
1415 1431 b = temp("base", base)
1416 1432 c = temp("other", other)
1417 1433
1418 1434 self.ui.note("resolving %s\n" % fn)
1419 1435 self.ui.debug("file %s: my %s other %s ancestor %s\n" %
1420 1436 (fn, short(my), short(other), short(base)))
1421 1437
1422 1438 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1423 1439 or "hgmerge")
1424 1440 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1425 1441 if r:
1426 1442 self.ui.warn("merging %s failed!\n" % fn)
1427 1443
1428 1444 os.unlink(b)
1429 1445 os.unlink(c)
1430 1446
1431 1447 def verify(self):
1432 1448 filelinkrevs = {}
1433 1449 filenodes = {}
1434 1450 changesets = revisions = files = 0
1435 1451 errors = [0]
1436 1452 neededmanifests = {}
1437 1453
1438 1454 def err(msg):
1439 1455 self.ui.warn(msg + "\n")
1440 1456 errors[0] += 1
1441 1457
1442 1458 seen = {}
1443 1459 self.ui.status("checking changesets\n")
1444 1460 for i in range(self.changelog.count()):
1445 1461 changesets += 1
1446 1462 n = self.changelog.node(i)
1447 1463 l = self.changelog.linkrev(n)
1448 1464 if l != i:
1449 1465 err("incorrect link (%d) for changeset revision %d" % (l, i))
1450 1466 if n in seen:
1451 1467 err("duplicate changeset at revision %d" % i)
1452 1468 seen[n] = 1
1453 1469
1454 1470 for p in self.changelog.parents(n):
1455 1471 if p not in self.changelog.nodemap:
1456 1472 err("changeset %s has unknown parent %s" %
1457 1473 (short(n), short(p)))
1458 1474 try:
1459 1475 changes = self.changelog.read(n)
1460 1476 except Exception, inst:
1461 1477 err("unpacking changeset %s: %s" % (short(n), inst))
1462 1478
1463 1479 neededmanifests[changes[0]] = n
1464 1480
1465 1481 for f in changes[3]:
1466 1482 filelinkrevs.setdefault(f, []).append(i)
1467 1483
1468 1484 seen = {}
1469 1485 self.ui.status("checking manifests\n")
1470 1486 for i in range(self.manifest.count()):
1471 1487 n = self.manifest.node(i)
1472 1488 l = self.manifest.linkrev(n)
1473 1489
1474 1490 if l < 0 or l >= self.changelog.count():
1475 1491 err("bad manifest link (%d) at revision %d" % (l, i))
1476 1492
1477 1493 if n in neededmanifests:
1478 1494 del neededmanifests[n]
1479 1495
1480 1496 if n in seen:
1481 1497 err("duplicate manifest at revision %d" % i)
1482 1498
1483 1499 seen[n] = 1
1484 1500
1485 1501 for p in self.manifest.parents(n):
1486 1502 if p not in self.manifest.nodemap:
1487 1503 err("manifest %s has unknown parent %s" %
1488 1504 (short(n), short(p)))
1489 1505
1490 1506 try:
1491 1507 delta = mdiff.patchtext(self.manifest.delta(n))
1492 1508 except KeyboardInterrupt:
1493 1509 self.ui.warn("interrupted")
1494 1510 raise
1495 1511 except Exception, inst:
1496 1512 err("unpacking manifest %s: %s" % (short(n), inst))
1497 1513
1498 1514 ff = [ l.split('\0') for l in delta.splitlines() ]
1499 1515 for f, fn in ff:
1500 1516 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1501 1517
1502 1518 self.ui.status("crosschecking files in changesets and manifests\n")
1503 1519
1504 1520 for m,c in neededmanifests.items():
1505 1521 err("Changeset %s refers to unknown manifest %s" %
1506 1522 (short(m), short(c)))
1507 1523 del neededmanifests
1508 1524
1509 1525 for f in filenodes:
1510 1526 if f not in filelinkrevs:
1511 1527 err("file %s in manifest but not in changesets" % f)
1512 1528
1513 1529 for f in filelinkrevs:
1514 1530 if f not in filenodes:
1515 1531 err("file %s in changeset but not in manifest" % f)
1516 1532
1517 1533 self.ui.status("checking files\n")
1518 1534 ff = filenodes.keys()
1519 1535 ff.sort()
1520 1536 for f in ff:
1521 1537 if f == "/dev/null": continue
1522 1538 files += 1
1523 1539 fl = self.file(f)
1524 1540 nodes = { nullid: 1 }
1525 1541 seen = {}
1526 1542 for i in range(fl.count()):
1527 1543 revisions += 1
1528 1544 n = fl.node(i)
1529 1545
1530 1546 if n in seen:
1531 1547 err("%s: duplicate revision %d" % (f, i))
1532 1548 if n not in filenodes[f]:
1533 1549 err("%s: %d:%s not in manifests" % (f, i, short(n)))
1534 1550 else:
1535 1551 del filenodes[f][n]
1536 1552
1537 1553 flr = fl.linkrev(n)
1538 1554 if flr not in filelinkrevs[f]:
1539 1555 err("%s:%s points to unexpected changeset %d"
1540 1556 % (f, short(n), flr))
1541 1557 else:
1542 1558 filelinkrevs[f].remove(flr)
1543 1559
1544 1560 # verify contents
1545 1561 try:
1546 1562 t = fl.read(n)
1547 1563 except Exception, inst:
1548 1564 err("unpacking file %s %s: %s" % (f, short(n), inst))
1549 1565
1550 1566 # verify parents
1551 1567 (p1, p2) = fl.parents(n)
1552 1568 if p1 not in nodes:
1553 1569 err("file %s:%s unknown parent 1 %s" %
1554 1570 (f, short(n), short(p1)))
1555 1571 if p2 not in nodes:
1556 1572 err("file %s:%s unknown parent 2 %s" %
1557 1573 (f, short(n), short(p1)))
1558 1574 nodes[n] = 1
1559 1575
1560 1576 # cross-check
1561 1577 for node in filenodes[f]:
1562 1578 err("node %s in manifests not in %s" % (hex(node), f))
1563 1579
1564 1580 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1565 1581 (files, changesets, revisions))
1566 1582
1567 1583 if errors[0]:
1568 1584 self.ui.warn("%d integrity errors encountered!\n" % errors[0])
1569 1585 return 1
General Comments 0
You need to be logged in to leave comments. Login now