##// END OF EJS Templates
localrepo: refactor the locking functions
Benoit Boissinot -
r1751:e9bf415a default
parent child Browse files
Show More
@@ -1,1853 +1,1853 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14 14
15 15 class localrepository(object):
16 16 def __init__(self, ui, path=None, create=0):
17 17 if not path:
18 18 p = os.getcwd()
19 19 while not os.path.isdir(os.path.join(p, ".hg")):
20 20 oldp = p
21 21 p = os.path.dirname(p)
22 22 if p == oldp:
23 23 raise repo.RepoError(_("no repo found"))
24 24 path = p
25 25 self.path = os.path.join(path, ".hg")
26 26
27 27 if not create and not os.path.isdir(self.path):
28 28 raise repo.RepoError(_("repository %s not found") % path)
29 29
30 30 self.root = os.path.abspath(path)
31 31 self.ui = ui
32 32 self.opener = util.opener(self.path)
33 33 self.wopener = util.opener(self.root)
34 34 self.manifest = manifest.manifest(self.opener)
35 35 self.changelog = changelog.changelog(self.opener)
36 36 self.tagscache = None
37 37 self.nodetagscache = None
38 38 self.encodepats = None
39 39 self.decodepats = None
40 40
41 41 if create:
42 42 os.mkdir(self.path)
43 43 os.mkdir(self.join("data"))
44 44
45 45 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
46 46 try:
47 47 self.ui.readconfig(self.join("hgrc"))
48 48 except IOError:
49 49 pass
50 50
51 51 def hook(self, name, throw=False, **args):
52 52 def runhook(name, cmd):
53 53 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
54 54 old = {}
55 55 for k, v in args.items():
56 56 k = k.upper()
57 57 old['HG_' + k] = os.environ.get(k, None)
58 58 old[k] = os.environ.get(k, None)
59 59 os.environ['HG_' + k] = str(v)
60 60 os.environ[k] = str(v)
61 61
62 62 try:
63 63 # Hooks run in the repository root
64 64 olddir = os.getcwd()
65 65 os.chdir(self.root)
66 66 r = os.system(cmd)
67 67 finally:
68 68 for k, v in old.items():
69 69 if v is not None:
70 70 os.environ[k] = v
71 71 else:
72 72 del os.environ[k]
73 73
74 74 os.chdir(olddir)
75 75
76 76 if r:
77 77 desc, r = util.explain_exit(r)
78 78 if throw:
79 79 raise util.Abort(_('%s hook %s') % (name, desc))
80 80 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
81 81 return False
82 82 return True
83 83
84 84 r = True
85 85 for hname, cmd in self.ui.configitems("hooks"):
86 86 s = hname.split(".")
87 87 if s[0] == name and cmd:
88 88 r = runhook(hname, cmd) and r
89 89 return r
90 90
91 91 def tags(self):
92 92 '''return a mapping of tag to node'''
93 93 if not self.tagscache:
94 94 self.tagscache = {}
95 95 def addtag(self, k, n):
96 96 try:
97 97 bin_n = bin(n)
98 98 except TypeError:
99 99 bin_n = ''
100 100 self.tagscache[k.strip()] = bin_n
101 101
102 102 try:
103 103 # read each head of the tags file, ending with the tip
104 104 # and add each tag found to the map, with "newer" ones
105 105 # taking precedence
106 106 fl = self.file(".hgtags")
107 107 h = fl.heads()
108 108 h.reverse()
109 109 for r in h:
110 110 for l in fl.read(r).splitlines():
111 111 if l:
112 112 n, k = l.split(" ", 1)
113 113 addtag(self, k, n)
114 114 except KeyError:
115 115 pass
116 116
117 117 try:
118 118 f = self.opener("localtags")
119 119 for l in f:
120 120 n, k = l.split(" ", 1)
121 121 addtag(self, k, n)
122 122 except IOError:
123 123 pass
124 124
125 125 self.tagscache['tip'] = self.changelog.tip()
126 126
127 127 return self.tagscache
128 128
129 129 def tagslist(self):
130 130 '''return a list of tags ordered by revision'''
131 131 l = []
132 132 for t, n in self.tags().items():
133 133 try:
134 134 r = self.changelog.rev(n)
135 135 except:
136 136 r = -2 # sort to the beginning of the list if unknown
137 137 l.append((r, t, n))
138 138 l.sort()
139 139 return [(t, n) for r, t, n in l]
140 140
141 141 def nodetags(self, node):
142 142 '''return the tags associated with a node'''
143 143 if not self.nodetagscache:
144 144 self.nodetagscache = {}
145 145 for t, n in self.tags().items():
146 146 self.nodetagscache.setdefault(n, []).append(t)
147 147 return self.nodetagscache.get(node, [])
148 148
149 149 def lookup(self, key):
150 150 try:
151 151 return self.tags()[key]
152 152 except KeyError:
153 153 try:
154 154 return self.changelog.lookup(key)
155 155 except:
156 156 raise repo.RepoError(_("unknown revision '%s'") % key)
157 157
158 158 def dev(self):
159 159 return os.stat(self.path).st_dev
160 160
161 161 def local(self):
162 162 return True
163 163
164 164 def join(self, f):
165 165 return os.path.join(self.path, f)
166 166
167 167 def wjoin(self, f):
168 168 return os.path.join(self.root, f)
169 169
170 170 def file(self, f):
171 171 if f[0] == '/':
172 172 f = f[1:]
173 173 return filelog.filelog(self.opener, f)
174 174
175 175 def getcwd(self):
176 176 return self.dirstate.getcwd()
177 177
178 178 def wfile(self, f, mode='r'):
179 179 return self.wopener(f, mode)
180 180
181 181 def wread(self, filename):
182 182 if self.encodepats == None:
183 183 l = []
184 184 for pat, cmd in self.ui.configitems("encode"):
185 185 mf = util.matcher("", "/", [pat], [], [])[1]
186 186 l.append((mf, cmd))
187 187 self.encodepats = l
188 188
189 189 data = self.wopener(filename, 'r').read()
190 190
191 191 for mf, cmd in self.encodepats:
192 192 if mf(filename):
193 193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
194 194 data = util.filter(data, cmd)
195 195 break
196 196
197 197 return data
198 198
199 199 def wwrite(self, filename, data, fd=None):
200 200 if self.decodepats == None:
201 201 l = []
202 202 for pat, cmd in self.ui.configitems("decode"):
203 203 mf = util.matcher("", "/", [pat], [], [])[1]
204 204 l.append((mf, cmd))
205 205 self.decodepats = l
206 206
207 207 for mf, cmd in self.decodepats:
208 208 if mf(filename):
209 209 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
210 210 data = util.filter(data, cmd)
211 211 break
212 212
213 213 if fd:
214 214 return fd.write(data)
215 215 return self.wopener(filename, 'w').write(data)
216 216
217 217 def transaction(self):
218 218 # save dirstate for undo
219 219 try:
220 220 ds = self.opener("dirstate").read()
221 221 except IOError:
222 222 ds = ""
223 223 self.opener("journal.dirstate", "w").write(ds)
224 224
225 225 def after():
226 226 util.rename(self.join("journal"), self.join("undo"))
227 227 util.rename(self.join("journal.dirstate"),
228 228 self.join("undo.dirstate"))
229 229
230 230 return transaction.transaction(self.ui.warn, self.opener,
231 231 self.join("journal"), after)
232 232
233 233 def recover(self):
234 234 lock = self.lock()
235 235 if os.path.exists(self.join("journal")):
236 236 self.ui.status(_("rolling back interrupted transaction\n"))
237 237 transaction.rollback(self.opener, self.join("journal"))
238 238 self.manifest = manifest.manifest(self.opener)
239 239 self.changelog = changelog.changelog(self.opener)
240 240 return True
241 241 else:
242 242 self.ui.warn(_("no interrupted transaction available\n"))
243 243 return False
244 244
245 245 def undo(self, wlock=None):
246 246 if not wlock:
247 247 wlock = self.wlock()
248 248 lock = self.lock()
249 249 if os.path.exists(self.join("undo")):
250 250 self.ui.status(_("rolling back last transaction\n"))
251 251 transaction.rollback(self.opener, self.join("undo"))
252 252 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
253 253 self.dirstate.read()
254 254 else:
255 255 self.ui.warn(_("no undo information available\n"))
256 256
257 def lock(self, wait=1):
257 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
258 258 try:
259 return lock.lock(self.join("lock"), 0)
260 except lock.LockHeld, inst:
261 if wait:
262 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
263 return lock.lock(self.join("lock"), wait)
264 raise inst
265
266 def wlock(self, wait=1):
267 try:
268 wlock = lock.lock(self.join("wlock"), 0, self.dirstate.write)
259 l = lock.lock(self.join(lockname), 0, releasefn)
269 260 except lock.LockHeld, inst:
270 261 if not wait:
271 262 raise inst
272 263 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
273 wlock = lock.lock(self.join("wlock"), wait, self.dirstate.write)
274 self.dirstate.read()
275 return wlock
264 l = lock.lock(self.join(lockname), wait, releasefn)
265 if acquirefn:
266 acquirefn()
267 return l
268
269 def lock(self, wait=1):
270 return self.do_lock("lock", wait)
271
272 def wlock(self, wait=1):
273 return self.do_lock("wlock", wait,
274 self.dirstate.write,
275 self.dirstate.read)
276 276
277 277 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
278 278 "determine whether a new filenode is needed"
279 279 fp1 = manifest1.get(filename, nullid)
280 280 fp2 = manifest2.get(filename, nullid)
281 281
282 282 if fp2 != nullid:
283 283 # is one parent an ancestor of the other?
284 284 fpa = filelog.ancestor(fp1, fp2)
285 285 if fpa == fp1:
286 286 fp1, fp2 = fp2, nullid
287 287 elif fpa == fp2:
288 288 fp2 = nullid
289 289
290 290 # is the file unmodified from the parent? report existing entry
291 291 if fp2 == nullid and text == filelog.read(fp1):
292 292 return (fp1, None, None)
293 293
294 294 return (None, fp1, fp2)
295 295
296 296 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
297 297 orig_parent = self.dirstate.parents()[0] or nullid
298 298 p1 = p1 or self.dirstate.parents()[0] or nullid
299 299 p2 = p2 or self.dirstate.parents()[1] or nullid
300 300 c1 = self.changelog.read(p1)
301 301 c2 = self.changelog.read(p2)
302 302 m1 = self.manifest.read(c1[0])
303 303 mf1 = self.manifest.readflags(c1[0])
304 304 m2 = self.manifest.read(c2[0])
305 305 changed = []
306 306
307 307 if orig_parent == p1:
308 308 update_dirstate = 1
309 309 else:
310 310 update_dirstate = 0
311 311
312 312 if not wlock:
313 313 wlock = self.wlock()
314 314 lock = self.lock()
315 315 tr = self.transaction()
316 316 mm = m1.copy()
317 317 mfm = mf1.copy()
318 318 linkrev = self.changelog.count()
319 319 for f in files:
320 320 try:
321 321 t = self.wread(f)
322 322 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
323 323 r = self.file(f)
324 324 mfm[f] = tm
325 325
326 326 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
327 327 if entry:
328 328 mm[f] = entry
329 329 continue
330 330
331 331 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
332 332 changed.append(f)
333 333 if update_dirstate:
334 334 self.dirstate.update([f], "n")
335 335 except IOError:
336 336 try:
337 337 del mm[f]
338 338 del mfm[f]
339 339 if update_dirstate:
340 340 self.dirstate.forget([f])
341 341 except:
342 342 # deleted from p2?
343 343 pass
344 344
345 345 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
346 346 user = user or self.ui.username()
347 347 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
348 348 tr.close()
349 349 if update_dirstate:
350 350 self.dirstate.setparents(n, nullid)
351 351
352 352 def commit(self, files=None, text="", user=None, date=None,
353 353 match=util.always, force=False, wlock=None):
354 354 commit = []
355 355 remove = []
356 356 changed = []
357 357
358 358 if files:
359 359 for f in files:
360 360 s = self.dirstate.state(f)
361 361 if s in 'nmai':
362 362 commit.append(f)
363 363 elif s == 'r':
364 364 remove.append(f)
365 365 else:
366 366 self.ui.warn(_("%s not tracked!\n") % f)
367 367 else:
368 368 modified, added, removed, deleted, unknown = self.changes(match=match)
369 369 commit = modified + added
370 370 remove = removed
371 371
372 372 p1, p2 = self.dirstate.parents()
373 373 c1 = self.changelog.read(p1)
374 374 c2 = self.changelog.read(p2)
375 375 m1 = self.manifest.read(c1[0])
376 376 mf1 = self.manifest.readflags(c1[0])
377 377 m2 = self.manifest.read(c2[0])
378 378
379 379 if not commit and not remove and not force and p2 == nullid:
380 380 self.ui.status(_("nothing changed\n"))
381 381 return None
382 382
383 383 xp1 = hex(p1)
384 384 if p2 == nullid: xp2 = ''
385 385 else: xp2 = hex(p2)
386 386
387 387 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
388 388
389 389 if not wlock:
390 390 wlock = self.wlock()
391 391 lock = self.lock()
392 392 tr = self.transaction()
393 393
394 394 # check in files
395 395 new = {}
396 396 linkrev = self.changelog.count()
397 397 commit.sort()
398 398 for f in commit:
399 399 self.ui.note(f + "\n")
400 400 try:
401 401 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
402 402 t = self.wread(f)
403 403 except IOError:
404 404 self.ui.warn(_("trouble committing %s!\n") % f)
405 405 raise
406 406
407 407 r = self.file(f)
408 408
409 409 meta = {}
410 410 cp = self.dirstate.copied(f)
411 411 if cp:
412 412 meta["copy"] = cp
413 413 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
414 414 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
415 415 fp1, fp2 = nullid, nullid
416 416 else:
417 417 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
418 418 if entry:
419 419 new[f] = entry
420 420 continue
421 421
422 422 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
423 423 # remember what we've added so that we can later calculate
424 424 # the files to pull from a set of changesets
425 425 changed.append(f)
426 426
427 427 # update manifest
428 428 m1 = m1.copy()
429 429 m1.update(new)
430 430 for f in remove:
431 431 if f in m1:
432 432 del m1[f]
433 433 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
434 434 (new, remove))
435 435
436 436 # add changeset
437 437 new = new.keys()
438 438 new.sort()
439 439
440 440 if not text:
441 441 edittext = [""]
442 442 if p2 != nullid:
443 443 edittext.append("HG: branch merge")
444 444 edittext.extend(["HG: changed %s" % f for f in changed])
445 445 edittext.extend(["HG: removed %s" % f for f in remove])
446 446 if not changed and not remove:
447 447 edittext.append("HG: no files changed")
448 448 edittext.append("")
449 449 # run editor in the repository root
450 450 olddir = os.getcwd()
451 451 os.chdir(self.root)
452 452 edittext = self.ui.edit("\n".join(edittext))
453 453 os.chdir(olddir)
454 454 if not edittext.rstrip():
455 455 return None
456 456 text = edittext
457 457
458 458 user = user or self.ui.username()
459 459 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
460 460 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
461 461 parent2=xp2)
462 462 tr.close()
463 463
464 464 self.dirstate.setparents(n)
465 465 self.dirstate.update(new, "n")
466 466 self.dirstate.forget(remove)
467 467
468 468 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
469 469 return n
470 470
471 471 def walk(self, node=None, files=[], match=util.always):
472 472 if node:
473 473 fdict = dict.fromkeys(files)
474 474 for fn in self.manifest.read(self.changelog.read(node)[0]):
475 475 fdict.pop(fn, None)
476 476 if match(fn):
477 477 yield 'm', fn
478 478 for fn in fdict:
479 479 self.ui.warn(_('%s: No such file in rev %s\n') % (
480 480 util.pathto(self.getcwd(), fn), short(node)))
481 481 else:
482 482 for src, fn in self.dirstate.walk(files, match):
483 483 yield src, fn
484 484
485 485 def changes(self, node1=None, node2=None, files=[], match=util.always,
486 486 wlock=None):
487 487 """return changes between two nodes or node and working directory
488 488
489 489 If node1 is None, use the first dirstate parent instead.
490 490 If node2 is None, compare node1 with working directory.
491 491 """
492 492
493 493 def fcmp(fn, mf):
494 494 t1 = self.wread(fn)
495 495 t2 = self.file(fn).read(mf.get(fn, nullid))
496 496 return cmp(t1, t2)
497 497
498 498 def mfmatches(node):
499 499 change = self.changelog.read(node)
500 500 mf = dict(self.manifest.read(change[0]))
501 501 for fn in mf.keys():
502 502 if not match(fn):
503 503 del mf[fn]
504 504 return mf
505 505
506 506 # are we comparing the working directory?
507 507 if not node2:
508 508 if not wlock:
509 509 try:
510 510 wlock = self.wlock(wait=0)
511 511 except lock.LockHeld:
512 512 wlock = None
513 513 lookup, modified, added, removed, deleted, unknown = (
514 514 self.dirstate.changes(files, match))
515 515
516 516 # are we comparing working dir against its parent?
517 517 if not node1:
518 518 if lookup:
519 519 # do a full compare of any files that might have changed
520 520 mf2 = mfmatches(self.dirstate.parents()[0])
521 521 for f in lookup:
522 522 if fcmp(f, mf2):
523 523 modified.append(f)
524 524 elif wlock is not None:
525 525 self.dirstate.update([f], "n")
526 526 else:
527 527 # we are comparing working dir against non-parent
528 528 # generate a pseudo-manifest for the working dir
529 529 mf2 = mfmatches(self.dirstate.parents()[0])
530 530 for f in lookup + modified + added:
531 531 mf2[f] = ""
532 532 for f in removed:
533 533 if f in mf2:
534 534 del mf2[f]
535 535 else:
536 536 # we are comparing two revisions
537 537 deleted, unknown = [], []
538 538 mf2 = mfmatches(node2)
539 539
540 540 if node1:
541 541 # flush lists from dirstate before comparing manifests
542 542 modified, added = [], []
543 543
544 544 mf1 = mfmatches(node1)
545 545
546 546 for fn in mf2:
547 547 if mf1.has_key(fn):
548 548 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
549 549 modified.append(fn)
550 550 del mf1[fn]
551 551 else:
552 552 added.append(fn)
553 553
554 554 removed = mf1.keys()
555 555
556 556 # sort and return results:
557 557 for l in modified, added, removed, deleted, unknown:
558 558 l.sort()
559 559 return (modified, added, removed, deleted, unknown)
560 560
561 561 def add(self, list, wlock=None):
562 562 if not wlock:
563 563 wlock = self.wlock()
564 564 for f in list:
565 565 p = self.wjoin(f)
566 566 if not os.path.exists(p):
567 567 self.ui.warn(_("%s does not exist!\n") % f)
568 568 elif not os.path.isfile(p):
569 569 self.ui.warn(_("%s not added: only files supported currently\n")
570 570 % f)
571 571 elif self.dirstate.state(f) in 'an':
572 572 self.ui.warn(_("%s already tracked!\n") % f)
573 573 else:
574 574 self.dirstate.update([f], "a")
575 575
576 576 def forget(self, list, wlock=None):
577 577 if not wlock:
578 578 wlock = self.wlock()
579 579 for f in list:
580 580 if self.dirstate.state(f) not in 'ai':
581 581 self.ui.warn(_("%s not added!\n") % f)
582 582 else:
583 583 self.dirstate.forget([f])
584 584
585 585 def remove(self, list, unlink=False, wlock=None):
586 586 if unlink:
587 587 for f in list:
588 588 try:
589 589 util.unlink(self.wjoin(f))
590 590 except OSError, inst:
591 591 if inst.errno != errno.ENOENT:
592 592 raise
593 593 if not wlock:
594 594 wlock = self.wlock()
595 595 for f in list:
596 596 p = self.wjoin(f)
597 597 if os.path.exists(p):
598 598 self.ui.warn(_("%s still exists!\n") % f)
599 599 elif self.dirstate.state(f) == 'a':
600 600 self.dirstate.forget([f])
601 601 elif f not in self.dirstate:
602 602 self.ui.warn(_("%s not tracked!\n") % f)
603 603 else:
604 604 self.dirstate.update([f], "r")
605 605
606 606 def undelete(self, list, wlock=None):
607 607 p = self.dirstate.parents()[0]
608 608 mn = self.changelog.read(p)[0]
609 609 mf = self.manifest.readflags(mn)
610 610 m = self.manifest.read(mn)
611 611 if not wlock:
612 612 wlock = self.wlock()
613 613 for f in list:
614 614 if self.dirstate.state(f) not in "r":
615 615 self.ui.warn("%s not removed!\n" % f)
616 616 else:
617 617 t = self.file(f).read(m[f])
618 618 self.wwrite(f, t)
619 619 util.set_exec(self.wjoin(f), mf[f])
620 620 self.dirstate.update([f], "n")
621 621
622 622 def copy(self, source, dest, wlock=None):
623 623 p = self.wjoin(dest)
624 624 if not os.path.exists(p):
625 625 self.ui.warn(_("%s does not exist!\n") % dest)
626 626 elif not os.path.isfile(p):
627 627 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
628 628 else:
629 629 if not wlock:
630 630 wlock = self.wlock()
631 631 if self.dirstate.state(dest) == '?':
632 632 self.dirstate.update([dest], "a")
633 633 self.dirstate.copy(source, dest)
634 634
635 635 def heads(self, start=None):
636 636 heads = self.changelog.heads(start)
637 637 # sort the output in rev descending order
638 638 heads = [(-self.changelog.rev(h), h) for h in heads]
639 639 heads.sort()
640 640 return [n for (r, n) in heads]
641 641
642 642 # branchlookup returns a dict giving a list of branches for
643 643 # each head. A branch is defined as the tag of a node or
644 644 # the branch of the node's parents. If a node has multiple
645 645 # branch tags, tags are eliminated if they are visible from other
646 646 # branch tags.
647 647 #
648 648 # So, for this graph: a->b->c->d->e
649 649 # \ /
650 650 # aa -----/
651 651 # a has tag 2.6.12
652 652 # d has tag 2.6.13
653 653 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
654 654 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
655 655 # from the list.
656 656 #
657 657 # It is possible that more than one head will have the same branch tag.
658 658 # callers need to check the result for multiple heads under the same
659 659 # branch tag if that is a problem for them (ie checkout of a specific
660 660 # branch).
661 661 #
662 662 # passing in a specific branch will limit the depth of the search
663 663 # through the parents. It won't limit the branches returned in the
664 664 # result though.
665 665 def branchlookup(self, heads=None, branch=None):
666 666 if not heads:
667 667 heads = self.heads()
668 668 headt = [ h for h in heads ]
669 669 chlog = self.changelog
670 670 branches = {}
671 671 merges = []
672 672 seenmerge = {}
673 673
674 674 # traverse the tree once for each head, recording in the branches
675 675 # dict which tags are visible from this head. The branches
676 676 # dict also records which tags are visible from each tag
677 677 # while we traverse.
678 678 while headt or merges:
679 679 if merges:
680 680 n, found = merges.pop()
681 681 visit = [n]
682 682 else:
683 683 h = headt.pop()
684 684 visit = [h]
685 685 found = [h]
686 686 seen = {}
687 687 while visit:
688 688 n = visit.pop()
689 689 if n in seen:
690 690 continue
691 691 pp = chlog.parents(n)
692 692 tags = self.nodetags(n)
693 693 if tags:
694 694 for x in tags:
695 695 if x == 'tip':
696 696 continue
697 697 for f in found:
698 698 branches.setdefault(f, {})[n] = 1
699 699 branches.setdefault(n, {})[n] = 1
700 700 break
701 701 if n not in found:
702 702 found.append(n)
703 703 if branch in tags:
704 704 continue
705 705 seen[n] = 1
706 706 if pp[1] != nullid and n not in seenmerge:
707 707 merges.append((pp[1], [x for x in found]))
708 708 seenmerge[n] = 1
709 709 if pp[0] != nullid:
710 710 visit.append(pp[0])
711 711 # traverse the branches dict, eliminating branch tags from each
712 712 # head that are visible from another branch tag for that head.
713 713 out = {}
714 714 viscache = {}
715 715 for h in heads:
716 716 def visible(node):
717 717 if node in viscache:
718 718 return viscache[node]
719 719 ret = {}
720 720 visit = [node]
721 721 while visit:
722 722 x = visit.pop()
723 723 if x in viscache:
724 724 ret.update(viscache[x])
725 725 elif x not in ret:
726 726 ret[x] = 1
727 727 if x in branches:
728 728 visit[len(visit):] = branches[x].keys()
729 729 viscache[node] = ret
730 730 return ret
731 731 if h not in branches:
732 732 continue
733 733 # O(n^2), but somewhat limited. This only searches the
734 734 # tags visible from a specific head, not all the tags in the
735 735 # whole repo.
736 736 for b in branches[h]:
737 737 vis = False
738 738 for bb in branches[h].keys():
739 739 if b != bb:
740 740 if b in visible(bb):
741 741 vis = True
742 742 break
743 743 if not vis:
744 744 l = out.setdefault(h, [])
745 745 l[len(l):] = self.nodetags(b)
746 746 return out
747 747
748 748 def branches(self, nodes):
749 749 if not nodes:
750 750 nodes = [self.changelog.tip()]
751 751 b = []
752 752 for n in nodes:
753 753 t = n
754 754 while n:
755 755 p = self.changelog.parents(n)
756 756 if p[1] != nullid or p[0] == nullid:
757 757 b.append((t, n, p[0], p[1]))
758 758 break
759 759 n = p[0]
760 760 return b
761 761
762 762 def between(self, pairs):
763 763 r = []
764 764
765 765 for top, bottom in pairs:
766 766 n, l, i = top, [], 0
767 767 f = 1
768 768
769 769 while n != bottom:
770 770 p = self.changelog.parents(n)[0]
771 771 if i == f:
772 772 l.append(n)
773 773 f = f * 2
774 774 n = p
775 775 i += 1
776 776
777 777 r.append(l)
778 778
779 779 return r
780 780
781 781 def findincoming(self, remote, base=None, heads=None):
782 782 m = self.changelog.nodemap
783 783 search = []
784 784 fetch = {}
785 785 seen = {}
786 786 seenbranch = {}
787 787 if base == None:
788 788 base = {}
789 789
790 790 # assume we're closer to the tip than the root
791 791 # and start by examining the heads
792 792 self.ui.status(_("searching for changes\n"))
793 793
794 794 if not heads:
795 795 heads = remote.heads()
796 796
797 797 unknown = []
798 798 for h in heads:
799 799 if h not in m:
800 800 unknown.append(h)
801 801 else:
802 802 base[h] = 1
803 803
804 804 if not unknown:
805 805 return None
806 806
807 807 rep = {}
808 808 reqcnt = 0
809 809
810 810 # search through remote branches
811 811 # a 'branch' here is a linear segment of history, with four parts:
812 812 # head, root, first parent, second parent
813 813 # (a branch always has two parents (or none) by definition)
814 814 unknown = remote.branches(unknown)
815 815 while unknown:
816 816 r = []
817 817 while unknown:
818 818 n = unknown.pop(0)
819 819 if n[0] in seen:
820 820 continue
821 821
822 822 self.ui.debug(_("examining %s:%s\n")
823 823 % (short(n[0]), short(n[1])))
824 824 if n[0] == nullid:
825 825 break
826 826 if n in seenbranch:
827 827 self.ui.debug(_("branch already found\n"))
828 828 continue
829 829 if n[1] and n[1] in m: # do we know the base?
830 830 self.ui.debug(_("found incomplete branch %s:%s\n")
831 831 % (short(n[0]), short(n[1])))
832 832 search.append(n) # schedule branch range for scanning
833 833 seenbranch[n] = 1
834 834 else:
835 835 if n[1] not in seen and n[1] not in fetch:
836 836 if n[2] in m and n[3] in m:
837 837 self.ui.debug(_("found new changeset %s\n") %
838 838 short(n[1]))
839 839 fetch[n[1]] = 1 # earliest unknown
840 840 base[n[2]] = 1 # latest known
841 841 continue
842 842
843 843 for a in n[2:4]:
844 844 if a not in rep:
845 845 r.append(a)
846 846 rep[a] = 1
847 847
848 848 seen[n[0]] = 1
849 849
850 850 if r:
851 851 reqcnt += 1
852 852 self.ui.debug(_("request %d: %s\n") %
853 853 (reqcnt, " ".join(map(short, r))))
854 854 for p in range(0, len(r), 10):
855 855 for b in remote.branches(r[p:p+10]):
856 856 self.ui.debug(_("received %s:%s\n") %
857 857 (short(b[0]), short(b[1])))
858 858 if b[0] in m:
859 859 self.ui.debug(_("found base node %s\n")
860 860 % short(b[0]))
861 861 base[b[0]] = 1
862 862 elif b[0] not in seen:
863 863 unknown.append(b)
864 864
865 865 # do binary search on the branches we found
866 866 while search:
867 867 n = search.pop(0)
868 868 reqcnt += 1
869 869 l = remote.between([(n[0], n[1])])[0]
870 870 l.append(n[1])
871 871 p = n[0]
872 872 f = 1
873 873 for i in l:
874 874 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
875 875 if i in m:
876 876 if f <= 2:
877 877 self.ui.debug(_("found new branch changeset %s\n") %
878 878 short(p))
879 879 fetch[p] = 1
880 880 base[i] = 1
881 881 else:
882 882 self.ui.debug(_("narrowed branch search to %s:%s\n")
883 883 % (short(p), short(i)))
884 884 search.append((p, i))
885 885 break
886 886 p, f = i, f * 2
887 887
888 888 # sanity check our fetch list
889 889 for f in fetch.keys():
890 890 if f in m:
891 891 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
892 892
893 893 if base.keys() == [nullid]:
894 894 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
895 895
896 896 self.ui.note(_("found new changesets starting at ") +
897 897 " ".join([short(f) for f in fetch]) + "\n")
898 898
899 899 self.ui.debug(_("%d total queries\n") % reqcnt)
900 900
901 901 return fetch.keys()
902 902
903 903 def findoutgoing(self, remote, base=None, heads=None):
904 904 if base == None:
905 905 base = {}
906 906 self.findincoming(remote, base, heads)
907 907
908 908 self.ui.debug(_("common changesets up to ")
909 909 + " ".join(map(short, base.keys())) + "\n")
910 910
911 911 remain = dict.fromkeys(self.changelog.nodemap)
912 912
913 913 # prune everything remote has from the tree
914 914 del remain[nullid]
915 915 remove = base.keys()
916 916 while remove:
917 917 n = remove.pop(0)
918 918 if n in remain:
919 919 del remain[n]
920 920 for p in self.changelog.parents(n):
921 921 remove.append(p)
922 922
923 923 # find every node whose parents have been pruned
924 924 subset = []
925 925 for n in remain:
926 926 p1, p2 = self.changelog.parents(n)
927 927 if p1 not in remain and p2 not in remain:
928 928 subset.append(n)
929 929
930 930 # this is the set of all roots we have to push
931 931 return subset
932 932
933 933 def pull(self, remote, heads=None):
934 934 lock = self.lock()
935 935
936 936 # if we have an empty repo, fetch everything
937 937 if self.changelog.tip() == nullid:
938 938 self.ui.status(_("requesting all changes\n"))
939 939 fetch = [nullid]
940 940 else:
941 941 fetch = self.findincoming(remote)
942 942
943 943 if not fetch:
944 944 self.ui.status(_("no changes found\n"))
945 945 return 1
946 946
947 947 if heads is None:
948 948 cg = remote.changegroup(fetch, 'pull')
949 949 else:
950 950 cg = remote.changegroupsubset(fetch, heads, 'pull')
951 951 return self.addchangegroup(cg)
952 952
953 953 def push(self, remote, force=False):
954 954 lock = remote.lock()
955 955
956 956 base = {}
957 957 heads = remote.heads()
958 958 inc = self.findincoming(remote, base, heads)
959 959 if not force and inc:
960 960 self.ui.warn(_("abort: unsynced remote changes!\n"))
961 961 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
962 962 return 1
963 963
964 964 update = self.findoutgoing(remote, base)
965 965 if not update:
966 966 self.ui.status(_("no changes found\n"))
967 967 return 1
968 968 elif not force:
969 969 if len(heads) < len(self.changelog.heads()):
970 970 self.ui.warn(_("abort: push creates new remote branches!\n"))
971 971 self.ui.status(_("(did you forget to merge?"
972 972 " use push -f to force)\n"))
973 973 return 1
974 974
975 975 cg = self.changegroup(update, 'push')
976 976 return remote.addchangegroup(cg)
977 977
978 978 def changegroupsubset(self, bases, heads, source):
979 979 """This function generates a changegroup consisting of all the nodes
980 980 that are descendents of any of the bases, and ancestors of any of
981 981 the heads.
982 982
983 983 It is fairly complex as determining which filenodes and which
984 984 manifest nodes need to be included for the changeset to be complete
985 985 is non-trivial.
986 986
987 987 Another wrinkle is doing the reverse, figuring out which changeset in
988 988 the changegroup a particular filenode or manifestnode belongs to."""
989 989
990 990 self.hook('preoutgoing', throw=True, source=source)
991 991
992 992 # Set up some initial variables
993 993 # Make it easy to refer to self.changelog
994 994 cl = self.changelog
995 995 # msng is short for missing - compute the list of changesets in this
996 996 # changegroup.
997 997 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
998 998 # Some bases may turn out to be superfluous, and some heads may be
999 999 # too. nodesbetween will return the minimal set of bases and heads
1000 1000 # necessary to re-create the changegroup.
1001 1001
1002 1002 # Known heads are the list of heads that it is assumed the recipient
1003 1003 # of this changegroup will know about.
1004 1004 knownheads = {}
1005 1005 # We assume that all parents of bases are known heads.
1006 1006 for n in bases:
1007 1007 for p in cl.parents(n):
1008 1008 if p != nullid:
1009 1009 knownheads[p] = 1
1010 1010 knownheads = knownheads.keys()
1011 1011 if knownheads:
1012 1012 # Now that we know what heads are known, we can compute which
1013 1013 # changesets are known. The recipient must know about all
1014 1014 # changesets required to reach the known heads from the null
1015 1015 # changeset.
1016 1016 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1017 1017 junk = None
1018 1018 # Transform the list into an ersatz set.
1019 1019 has_cl_set = dict.fromkeys(has_cl_set)
1020 1020 else:
1021 1021 # If there were no known heads, the recipient cannot be assumed to
1022 1022 # know about any changesets.
1023 1023 has_cl_set = {}
1024 1024
1025 1025 # Make it easy to refer to self.manifest
1026 1026 mnfst = self.manifest
1027 1027 # We don't know which manifests are missing yet
1028 1028 msng_mnfst_set = {}
1029 1029 # Nor do we know which filenodes are missing.
1030 1030 msng_filenode_set = {}
1031 1031
1032 1032 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1033 1033 junk = None
1034 1034
1035 1035 # A changeset always belongs to itself, so the changenode lookup
1036 1036 # function for a changenode is identity.
1037 1037 def identity(x):
1038 1038 return x
1039 1039
1040 1040 # A function generating function. Sets up an environment for the
1041 1041 # inner function.
1042 1042 def cmp_by_rev_func(revlog):
1043 1043 # Compare two nodes by their revision number in the environment's
1044 1044 # revision history. Since the revision number both represents the
1045 1045 # most efficient order to read the nodes in, and represents a
1046 1046 # topological sorting of the nodes, this function is often useful.
1047 1047 def cmp_by_rev(a, b):
1048 1048 return cmp(revlog.rev(a), revlog.rev(b))
1049 1049 return cmp_by_rev
1050 1050
1051 1051 # If we determine that a particular file or manifest node must be a
1052 1052 # node that the recipient of the changegroup will already have, we can
1053 1053 # also assume the recipient will have all the parents. This function
1054 1054 # prunes them from the set of missing nodes.
1055 1055 def prune_parents(revlog, hasset, msngset):
1056 1056 haslst = hasset.keys()
1057 1057 haslst.sort(cmp_by_rev_func(revlog))
1058 1058 for node in haslst:
1059 1059 parentlst = [p for p in revlog.parents(node) if p != nullid]
1060 1060 while parentlst:
1061 1061 n = parentlst.pop()
1062 1062 if n not in hasset:
1063 1063 hasset[n] = 1
1064 1064 p = [p for p in revlog.parents(n) if p != nullid]
1065 1065 parentlst.extend(p)
1066 1066 for n in hasset:
1067 1067 msngset.pop(n, None)
1068 1068
1069 1069 # This is a function generating function used to set up an environment
1070 1070 # for the inner function to execute in.
1071 1071 def manifest_and_file_collector(changedfileset):
1072 1072 # This is an information gathering function that gathers
1073 1073 # information from each changeset node that goes out as part of
1074 1074 # the changegroup. The information gathered is a list of which
1075 1075 # manifest nodes are potentially required (the recipient may
1076 1076 # already have them) and total list of all files which were
1077 1077 # changed in any changeset in the changegroup.
1078 1078 #
1079 1079 # We also remember the first changenode we saw any manifest
1080 1080 # referenced by so we can later determine which changenode 'owns'
1081 1081 # the manifest.
1082 1082 def collect_manifests_and_files(clnode):
1083 1083 c = cl.read(clnode)
1084 1084 for f in c[3]:
1085 1085 # This is to make sure we only have one instance of each
1086 1086 # filename string for each filename.
1087 1087 changedfileset.setdefault(f, f)
1088 1088 msng_mnfst_set.setdefault(c[0], clnode)
1089 1089 return collect_manifests_and_files
1090 1090
1091 1091 # Figure out which manifest nodes (of the ones we think might be part
1092 1092 # of the changegroup) the recipient must know about and remove them
1093 1093 # from the changegroup.
1094 1094 def prune_manifests():
1095 1095 has_mnfst_set = {}
1096 1096 for n in msng_mnfst_set:
1097 1097 # If a 'missing' manifest thinks it belongs to a changenode
1098 1098 # the recipient is assumed to have, obviously the recipient
1099 1099 # must have that manifest.
1100 1100 linknode = cl.node(mnfst.linkrev(n))
1101 1101 if linknode in has_cl_set:
1102 1102 has_mnfst_set[n] = 1
1103 1103 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1104 1104
1105 1105 # Use the information collected in collect_manifests_and_files to say
1106 1106 # which changenode any manifestnode belongs to.
1107 1107 def lookup_manifest_link(mnfstnode):
1108 1108 return msng_mnfst_set[mnfstnode]
1109 1109
1110 1110 # A function generating function that sets up the initial environment
1111 1111 # the inner function.
1112 1112 def filenode_collector(changedfiles):
1113 1113 next_rev = [0]
1114 1114 # This gathers information from each manifestnode included in the
1115 1115 # changegroup about which filenodes the manifest node references
1116 1116 # so we can include those in the changegroup too.
1117 1117 #
1118 1118 # It also remembers which changenode each filenode belongs to. It
1119 1119 # does this by assuming the a filenode belongs to the changenode
1120 1120 # the first manifest that references it belongs to.
1121 1121 def collect_msng_filenodes(mnfstnode):
1122 1122 r = mnfst.rev(mnfstnode)
1123 1123 if r == next_rev[0]:
1124 1124 # If the last rev we looked at was the one just previous,
1125 1125 # we only need to see a diff.
1126 1126 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1127 1127 # For each line in the delta
1128 1128 for dline in delta.splitlines():
1129 1129 # get the filename and filenode for that line
1130 1130 f, fnode = dline.split('\0')
1131 1131 fnode = bin(fnode[:40])
1132 1132 f = changedfiles.get(f, None)
1133 1133 # And if the file is in the list of files we care
1134 1134 # about.
1135 1135 if f is not None:
1136 1136 # Get the changenode this manifest belongs to
1137 1137 clnode = msng_mnfst_set[mnfstnode]
1138 1138 # Create the set of filenodes for the file if
1139 1139 # there isn't one already.
1140 1140 ndset = msng_filenode_set.setdefault(f, {})
1141 1141 # And set the filenode's changelog node to the
1142 1142 # manifest's if it hasn't been set already.
1143 1143 ndset.setdefault(fnode, clnode)
1144 1144 else:
1145 1145 # Otherwise we need a full manifest.
1146 1146 m = mnfst.read(mnfstnode)
1147 1147 # For every file in we care about.
1148 1148 for f in changedfiles:
1149 1149 fnode = m.get(f, None)
1150 1150 # If it's in the manifest
1151 1151 if fnode is not None:
1152 1152 # See comments above.
1153 1153 clnode = msng_mnfst_set[mnfstnode]
1154 1154 ndset = msng_filenode_set.setdefault(f, {})
1155 1155 ndset.setdefault(fnode, clnode)
1156 1156 # Remember the revision we hope to see next.
1157 1157 next_rev[0] = r + 1
1158 1158 return collect_msng_filenodes
1159 1159
1160 1160 # We have a list of filenodes we think we need for a file, lets remove
1161 1161 # all those we now the recipient must have.
1162 1162 def prune_filenodes(f, filerevlog):
1163 1163 msngset = msng_filenode_set[f]
1164 1164 hasset = {}
1165 1165 # If a 'missing' filenode thinks it belongs to a changenode we
1166 1166 # assume the recipient must have, then the recipient must have
1167 1167 # that filenode.
1168 1168 for n in msngset:
1169 1169 clnode = cl.node(filerevlog.linkrev(n))
1170 1170 if clnode in has_cl_set:
1171 1171 hasset[n] = 1
1172 1172 prune_parents(filerevlog, hasset, msngset)
1173 1173
1174 1174 # A function generator function that sets up the a context for the
1175 1175 # inner function.
1176 1176 def lookup_filenode_link_func(fname):
1177 1177 msngset = msng_filenode_set[fname]
1178 1178 # Lookup the changenode the filenode belongs to.
1179 1179 def lookup_filenode_link(fnode):
1180 1180 return msngset[fnode]
1181 1181 return lookup_filenode_link
1182 1182
1183 1183 # Now that we have all theses utility functions to help out and
1184 1184 # logically divide up the task, generate the group.
1185 1185 def gengroup():
1186 1186 # The set of changed files starts empty.
1187 1187 changedfiles = {}
1188 1188 # Create a changenode group generator that will call our functions
1189 1189 # back to lookup the owning changenode and collect information.
1190 1190 group = cl.group(msng_cl_lst, identity,
1191 1191 manifest_and_file_collector(changedfiles))
1192 1192 for chnk in group:
1193 1193 yield chnk
1194 1194
1195 1195 # The list of manifests has been collected by the generator
1196 1196 # calling our functions back.
1197 1197 prune_manifests()
1198 1198 msng_mnfst_lst = msng_mnfst_set.keys()
1199 1199 # Sort the manifestnodes by revision number.
1200 1200 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1201 1201 # Create a generator for the manifestnodes that calls our lookup
1202 1202 # and data collection functions back.
1203 1203 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1204 1204 filenode_collector(changedfiles))
1205 1205 for chnk in group:
1206 1206 yield chnk
1207 1207
1208 1208 # These are no longer needed, dereference and toss the memory for
1209 1209 # them.
1210 1210 msng_mnfst_lst = None
1211 1211 msng_mnfst_set.clear()
1212 1212
1213 1213 changedfiles = changedfiles.keys()
1214 1214 changedfiles.sort()
1215 1215 # Go through all our files in order sorted by name.
1216 1216 for fname in changedfiles:
1217 1217 filerevlog = self.file(fname)
1218 1218 # Toss out the filenodes that the recipient isn't really
1219 1219 # missing.
1220 1220 if msng_filenode_set.has_key(fname):
1221 1221 prune_filenodes(fname, filerevlog)
1222 1222 msng_filenode_lst = msng_filenode_set[fname].keys()
1223 1223 else:
1224 1224 msng_filenode_lst = []
1225 1225 # If any filenodes are left, generate the group for them,
1226 1226 # otherwise don't bother.
1227 1227 if len(msng_filenode_lst) > 0:
1228 1228 yield struct.pack(">l", len(fname) + 4) + fname
1229 1229 # Sort the filenodes by their revision #
1230 1230 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1231 1231 # Create a group generator and only pass in a changenode
1232 1232 # lookup function as we need to collect no information
1233 1233 # from filenodes.
1234 1234 group = filerevlog.group(msng_filenode_lst,
1235 1235 lookup_filenode_link_func(fname))
1236 1236 for chnk in group:
1237 1237 yield chnk
1238 1238 if msng_filenode_set.has_key(fname):
1239 1239 # Don't need this anymore, toss it to free memory.
1240 1240 del msng_filenode_set[fname]
1241 1241 # Signal that no more groups are left.
1242 1242 yield struct.pack(">l", 0)
1243 1243
1244 1244 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1245 1245
1246 1246 return util.chunkbuffer(gengroup())
1247 1247
1248 1248 def changegroup(self, basenodes, source):
1249 1249 """Generate a changegroup of all nodes that we have that a recipient
1250 1250 doesn't.
1251 1251
1252 1252 This is much easier than the previous function as we can assume that
1253 1253 the recipient has any changenode we aren't sending them."""
1254 1254
1255 1255 self.hook('preoutgoing', throw=True, source=source)
1256 1256
1257 1257 cl = self.changelog
1258 1258 nodes = cl.nodesbetween(basenodes, None)[0]
1259 1259 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1260 1260
1261 1261 def identity(x):
1262 1262 return x
1263 1263
1264 1264 def gennodelst(revlog):
1265 1265 for r in xrange(0, revlog.count()):
1266 1266 n = revlog.node(r)
1267 1267 if revlog.linkrev(n) in revset:
1268 1268 yield n
1269 1269
1270 1270 def changed_file_collector(changedfileset):
1271 1271 def collect_changed_files(clnode):
1272 1272 c = cl.read(clnode)
1273 1273 for fname in c[3]:
1274 1274 changedfileset[fname] = 1
1275 1275 return collect_changed_files
1276 1276
1277 1277 def lookuprevlink_func(revlog):
1278 1278 def lookuprevlink(n):
1279 1279 return cl.node(revlog.linkrev(n))
1280 1280 return lookuprevlink
1281 1281
1282 1282 def gengroup():
1283 1283 # construct a list of all changed files
1284 1284 changedfiles = {}
1285 1285
1286 1286 for chnk in cl.group(nodes, identity,
1287 1287 changed_file_collector(changedfiles)):
1288 1288 yield chnk
1289 1289 changedfiles = changedfiles.keys()
1290 1290 changedfiles.sort()
1291 1291
1292 1292 mnfst = self.manifest
1293 1293 nodeiter = gennodelst(mnfst)
1294 1294 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1295 1295 yield chnk
1296 1296
1297 1297 for fname in changedfiles:
1298 1298 filerevlog = self.file(fname)
1299 1299 nodeiter = gennodelst(filerevlog)
1300 1300 nodeiter = list(nodeiter)
1301 1301 if nodeiter:
1302 1302 yield struct.pack(">l", len(fname) + 4) + fname
1303 1303 lookup = lookuprevlink_func(filerevlog)
1304 1304 for chnk in filerevlog.group(nodeiter, lookup):
1305 1305 yield chnk
1306 1306
1307 1307 yield struct.pack(">l", 0)
1308 1308 self.hook('outgoing', node=hex(nodes[0]), source=source)
1309 1309
1310 1310 return util.chunkbuffer(gengroup())
1311 1311
1312 1312 def addchangegroup(self, source):
1313 1313
1314 1314 def getchunk():
1315 1315 d = source.read(4)
1316 1316 if not d:
1317 1317 return ""
1318 1318 l = struct.unpack(">l", d)[0]
1319 1319 if l <= 4:
1320 1320 return ""
1321 1321 d = source.read(l - 4)
1322 1322 if len(d) < l - 4:
1323 1323 raise repo.RepoError(_("premature EOF reading chunk"
1324 1324 " (got %d bytes, expected %d)")
1325 1325 % (len(d), l - 4))
1326 1326 return d
1327 1327
1328 1328 def getgroup():
1329 1329 while 1:
1330 1330 c = getchunk()
1331 1331 if not c:
1332 1332 break
1333 1333 yield c
1334 1334
1335 1335 def csmap(x):
1336 1336 self.ui.debug(_("add changeset %s\n") % short(x))
1337 1337 return self.changelog.count()
1338 1338
1339 1339 def revmap(x):
1340 1340 return self.changelog.rev(x)
1341 1341
1342 1342 if not source:
1343 1343 return
1344 1344
1345 1345 self.hook('prechangegroup', throw=True)
1346 1346
1347 1347 changesets = files = revisions = 0
1348 1348
1349 1349 tr = self.transaction()
1350 1350
1351 1351 oldheads = len(self.changelog.heads())
1352 1352
1353 1353 # pull off the changeset group
1354 1354 self.ui.status(_("adding changesets\n"))
1355 1355 co = self.changelog.tip()
1356 1356 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1357 1357 cnr, cor = map(self.changelog.rev, (cn, co))
1358 1358 if cn == nullid:
1359 1359 cnr = cor
1360 1360 changesets = cnr - cor
1361 1361
1362 1362 # pull off the manifest group
1363 1363 self.ui.status(_("adding manifests\n"))
1364 1364 mm = self.manifest.tip()
1365 1365 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1366 1366
1367 1367 # process the files
1368 1368 self.ui.status(_("adding file changes\n"))
1369 1369 while 1:
1370 1370 f = getchunk()
1371 1371 if not f:
1372 1372 break
1373 1373 self.ui.debug(_("adding %s revisions\n") % f)
1374 1374 fl = self.file(f)
1375 1375 o = fl.count()
1376 1376 n = fl.addgroup(getgroup(), revmap, tr)
1377 1377 revisions += fl.count() - o
1378 1378 files += 1
1379 1379
1380 1380 newheads = len(self.changelog.heads())
1381 1381 heads = ""
1382 1382 if oldheads and newheads > oldheads:
1383 1383 heads = _(" (+%d heads)") % (newheads - oldheads)
1384 1384
1385 1385 self.ui.status(_("added %d changesets"
1386 1386 " with %d changes to %d files%s\n")
1387 1387 % (changesets, revisions, files, heads))
1388 1388
1389 1389 self.hook('pretxnchangegroup', throw=True,
1390 1390 node=hex(self.changelog.node(cor+1)))
1391 1391
1392 1392 tr.close()
1393 1393
1394 1394 if changesets > 0:
1395 1395 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1396 1396
1397 1397 for i in range(cor + 1, cnr + 1):
1398 1398 self.hook("incoming", node=hex(self.changelog.node(i)))
1399 1399
1400 1400 def update(self, node, allow=False, force=False, choose=None,
1401 1401 moddirstate=True, forcemerge=False, wlock=None):
1402 1402 pl = self.dirstate.parents()
1403 1403 if not force and pl[1] != nullid:
1404 1404 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1405 1405 return 1
1406 1406
1407 1407 err = False
1408 1408
1409 1409 p1, p2 = pl[0], node
1410 1410 pa = self.changelog.ancestor(p1, p2)
1411 1411 m1n = self.changelog.read(p1)[0]
1412 1412 m2n = self.changelog.read(p2)[0]
1413 1413 man = self.manifest.ancestor(m1n, m2n)
1414 1414 m1 = self.manifest.read(m1n)
1415 1415 mf1 = self.manifest.readflags(m1n)
1416 1416 m2 = self.manifest.read(m2n).copy()
1417 1417 mf2 = self.manifest.readflags(m2n)
1418 1418 ma = self.manifest.read(man)
1419 1419 mfa = self.manifest.readflags(man)
1420 1420
1421 1421 modified, added, removed, deleted, unknown = self.changes()
1422 1422
1423 1423 # is this a jump, or a merge? i.e. is there a linear path
1424 1424 # from p1 to p2?
1425 1425 linear_path = (pa == p1 or pa == p2)
1426 1426
1427 1427 if allow and linear_path:
1428 1428 raise util.Abort(_("there is nothing to merge, "
1429 1429 "just use 'hg update'"))
1430 1430 if allow and not forcemerge:
1431 1431 if modified or added or removed:
1432 1432 raise util.Abort(_("outstanding uncommited changes"))
1433 1433 if not forcemerge and not force:
1434 1434 for f in unknown:
1435 1435 if f in m2:
1436 1436 t1 = self.wread(f)
1437 1437 t2 = self.file(f).read(m2[f])
1438 1438 if cmp(t1, t2) != 0:
1439 1439 raise util.Abort(_("'%s' already exists in the working"
1440 1440 " dir and differs from remote") % f)
1441 1441
1442 1442 # resolve the manifest to determine which files
1443 1443 # we care about merging
1444 1444 self.ui.note(_("resolving manifests\n"))
1445 1445 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1446 1446 (force, allow, moddirstate, linear_path))
1447 1447 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1448 1448 (short(man), short(m1n), short(m2n)))
1449 1449
1450 1450 merge = {}
1451 1451 get = {}
1452 1452 remove = []
1453 1453
1454 1454 # construct a working dir manifest
1455 1455 mw = m1.copy()
1456 1456 mfw = mf1.copy()
1457 1457 umap = dict.fromkeys(unknown)
1458 1458
1459 1459 for f in added + modified + unknown:
1460 1460 mw[f] = ""
1461 1461 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1462 1462
1463 1463 if moddirstate and not wlock:
1464 1464 wlock = self.wlock()
1465 1465
1466 1466 for f in deleted + removed:
1467 1467 if f in mw:
1468 1468 del mw[f]
1469 1469
1470 1470 # If we're jumping between revisions (as opposed to merging),
1471 1471 # and if neither the working directory nor the target rev has
1472 1472 # the file, then we need to remove it from the dirstate, to
1473 1473 # prevent the dirstate from listing the file when it is no
1474 1474 # longer in the manifest.
1475 1475 if moddirstate and linear_path and f not in m2:
1476 1476 self.dirstate.forget((f,))
1477 1477
1478 1478 # Compare manifests
1479 1479 for f, n in mw.iteritems():
1480 1480 if choose and not choose(f):
1481 1481 continue
1482 1482 if f in m2:
1483 1483 s = 0
1484 1484
1485 1485 # is the wfile new since m1, and match m2?
1486 1486 if f not in m1:
1487 1487 t1 = self.wread(f)
1488 1488 t2 = self.file(f).read(m2[f])
1489 1489 if cmp(t1, t2) == 0:
1490 1490 n = m2[f]
1491 1491 del t1, t2
1492 1492
1493 1493 # are files different?
1494 1494 if n != m2[f]:
1495 1495 a = ma.get(f, nullid)
1496 1496 # are both different from the ancestor?
1497 1497 if n != a and m2[f] != a:
1498 1498 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1499 1499 # merge executable bits
1500 1500 # "if we changed or they changed, change in merge"
1501 1501 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1502 1502 mode = ((a^b) | (a^c)) ^ a
1503 1503 merge[f] = (m1.get(f, nullid), m2[f], mode)
1504 1504 s = 1
1505 1505 # are we clobbering?
1506 1506 # is remote's version newer?
1507 1507 # or are we going back in time?
1508 1508 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1509 1509 self.ui.debug(_(" remote %s is newer, get\n") % f)
1510 1510 get[f] = m2[f]
1511 1511 s = 1
1512 1512 elif f in umap:
1513 1513 # this unknown file is the same as the checkout
1514 1514 get[f] = m2[f]
1515 1515
1516 1516 if not s and mfw[f] != mf2[f]:
1517 1517 if force:
1518 1518 self.ui.debug(_(" updating permissions for %s\n") % f)
1519 1519 util.set_exec(self.wjoin(f), mf2[f])
1520 1520 else:
1521 1521 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1522 1522 mode = ((a^b) | (a^c)) ^ a
1523 1523 if mode != b:
1524 1524 self.ui.debug(_(" updating permissions for %s\n")
1525 1525 % f)
1526 1526 util.set_exec(self.wjoin(f), mode)
1527 1527 del m2[f]
1528 1528 elif f in ma:
1529 1529 if n != ma[f]:
1530 1530 r = _("d")
1531 1531 if not force and (linear_path or allow):
1532 1532 r = self.ui.prompt(
1533 1533 (_(" local changed %s which remote deleted\n") % f) +
1534 1534 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1535 1535 if r == _("d"):
1536 1536 remove.append(f)
1537 1537 else:
1538 1538 self.ui.debug(_("other deleted %s\n") % f)
1539 1539 remove.append(f) # other deleted it
1540 1540 else:
1541 1541 # file is created on branch or in working directory
1542 1542 if force and f not in umap:
1543 1543 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1544 1544 remove.append(f)
1545 1545 elif n == m1.get(f, nullid): # same as parent
1546 1546 if p2 == pa: # going backwards?
1547 1547 self.ui.debug(_("remote deleted %s\n") % f)
1548 1548 remove.append(f)
1549 1549 else:
1550 1550 self.ui.debug(_("local modified %s, keeping\n") % f)
1551 1551 else:
1552 1552 self.ui.debug(_("working dir created %s, keeping\n") % f)
1553 1553
1554 1554 for f, n in m2.iteritems():
1555 1555 if choose and not choose(f):
1556 1556 continue
1557 1557 if f[0] == "/":
1558 1558 continue
1559 1559 if f in ma and n != ma[f]:
1560 1560 r = _("k")
1561 1561 if not force and (linear_path or allow):
1562 1562 r = self.ui.prompt(
1563 1563 (_("remote changed %s which local deleted\n") % f) +
1564 1564 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1565 1565 if r == _("k"):
1566 1566 get[f] = n
1567 1567 elif f not in ma:
1568 1568 self.ui.debug(_("remote created %s\n") % f)
1569 1569 get[f] = n
1570 1570 else:
1571 1571 if force or p2 == pa: # going backwards?
1572 1572 self.ui.debug(_("local deleted %s, recreating\n") % f)
1573 1573 get[f] = n
1574 1574 else:
1575 1575 self.ui.debug(_("local deleted %s\n") % f)
1576 1576
1577 1577 del mw, m1, m2, ma
1578 1578
1579 1579 if force:
1580 1580 for f in merge:
1581 1581 get[f] = merge[f][1]
1582 1582 merge = {}
1583 1583
1584 1584 if linear_path or force:
1585 1585 # we don't need to do any magic, just jump to the new rev
1586 1586 branch_merge = False
1587 1587 p1, p2 = p2, nullid
1588 1588 else:
1589 1589 if not allow:
1590 1590 self.ui.status(_("this update spans a branch"
1591 1591 " affecting the following files:\n"))
1592 1592 fl = merge.keys() + get.keys()
1593 1593 fl.sort()
1594 1594 for f in fl:
1595 1595 cf = ""
1596 1596 if f in merge:
1597 1597 cf = _(" (resolve)")
1598 1598 self.ui.status(" %s%s\n" % (f, cf))
1599 1599 self.ui.warn(_("aborting update spanning branches!\n"))
1600 1600 self.ui.status(_("(use update -m to merge across branches"
1601 1601 " or -C to lose changes)\n"))
1602 1602 return 1
1603 1603 branch_merge = True
1604 1604
1605 1605 # get the files we don't need to change
1606 1606 files = get.keys()
1607 1607 files.sort()
1608 1608 for f in files:
1609 1609 if f[0] == "/":
1610 1610 continue
1611 1611 self.ui.note(_("getting %s\n") % f)
1612 1612 t = self.file(f).read(get[f])
1613 1613 self.wwrite(f, t)
1614 1614 util.set_exec(self.wjoin(f), mf2[f])
1615 1615 if moddirstate:
1616 1616 if branch_merge:
1617 1617 self.dirstate.update([f], 'n', st_mtime=-1)
1618 1618 else:
1619 1619 self.dirstate.update([f], 'n')
1620 1620
1621 1621 # merge the tricky bits
1622 1622 files = merge.keys()
1623 1623 files.sort()
1624 1624 for f in files:
1625 1625 self.ui.status(_("merging %s\n") % f)
1626 1626 my, other, flag = merge[f]
1627 1627 ret = self.merge3(f, my, other)
1628 1628 if ret:
1629 1629 err = True
1630 1630 util.set_exec(self.wjoin(f), flag)
1631 1631 if moddirstate:
1632 1632 if branch_merge:
1633 1633 # We've done a branch merge, mark this file as merged
1634 1634 # so that we properly record the merger later
1635 1635 self.dirstate.update([f], 'm')
1636 1636 else:
1637 1637 # We've update-merged a locally modified file, so
1638 1638 # we set the dirstate to emulate a normal checkout
1639 1639 # of that file some time in the past. Thus our
1640 1640 # merge will appear as a normal local file
1641 1641 # modification.
1642 1642 f_len = len(self.file(f).read(other))
1643 1643 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1644 1644
1645 1645 remove.sort()
1646 1646 for f in remove:
1647 1647 self.ui.note(_("removing %s\n") % f)
1648 1648 try:
1649 1649 util.unlink(self.wjoin(f))
1650 1650 except OSError, inst:
1651 1651 if inst.errno != errno.ENOENT:
1652 1652 self.ui.warn(_("update failed to remove %s: %s!\n") %
1653 1653 (f, inst.strerror))
1654 1654 if moddirstate:
1655 1655 if branch_merge:
1656 1656 self.dirstate.update(remove, 'r')
1657 1657 else:
1658 1658 self.dirstate.forget(remove)
1659 1659
1660 1660 if moddirstate:
1661 1661 self.dirstate.setparents(p1, p2)
1662 1662 return err
1663 1663
1664 1664 def merge3(self, fn, my, other):
1665 1665 """perform a 3-way merge in the working directory"""
1666 1666
1667 1667 def temp(prefix, node):
1668 1668 pre = "%s~%s." % (os.path.basename(fn), prefix)
1669 1669 (fd, name) = tempfile.mkstemp("", pre)
1670 1670 f = os.fdopen(fd, "wb")
1671 1671 self.wwrite(fn, fl.read(node), f)
1672 1672 f.close()
1673 1673 return name
1674 1674
1675 1675 fl = self.file(fn)
1676 1676 base = fl.ancestor(my, other)
1677 1677 a = self.wjoin(fn)
1678 1678 b = temp("base", base)
1679 1679 c = temp("other", other)
1680 1680
1681 1681 self.ui.note(_("resolving %s\n") % fn)
1682 1682 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1683 1683 (fn, short(my), short(other), short(base)))
1684 1684
1685 1685 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1686 1686 or "hgmerge")
1687 1687 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1688 1688 if r:
1689 1689 self.ui.warn(_("merging %s failed!\n") % fn)
1690 1690
1691 1691 os.unlink(b)
1692 1692 os.unlink(c)
1693 1693 return r
1694 1694
1695 1695 def verify(self):
1696 1696 filelinkrevs = {}
1697 1697 filenodes = {}
1698 1698 changesets = revisions = files = 0
1699 1699 errors = [0]
1700 1700 neededmanifests = {}
1701 1701
1702 1702 def err(msg):
1703 1703 self.ui.warn(msg + "\n")
1704 1704 errors[0] += 1
1705 1705
1706 1706 def checksize(obj, name):
1707 1707 d = obj.checksize()
1708 1708 if d[0]:
1709 1709 err(_("%s data length off by %d bytes") % (name, d[0]))
1710 1710 if d[1]:
1711 1711 err(_("%s index contains %d extra bytes") % (name, d[1]))
1712 1712
1713 1713 seen = {}
1714 1714 self.ui.status(_("checking changesets\n"))
1715 1715 checksize(self.changelog, "changelog")
1716 1716
1717 1717 for i in range(self.changelog.count()):
1718 1718 changesets += 1
1719 1719 n = self.changelog.node(i)
1720 1720 l = self.changelog.linkrev(n)
1721 1721 if l != i:
1722 1722 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1723 1723 if n in seen:
1724 1724 err(_("duplicate changeset at revision %d") % i)
1725 1725 seen[n] = 1
1726 1726
1727 1727 for p in self.changelog.parents(n):
1728 1728 if p not in self.changelog.nodemap:
1729 1729 err(_("changeset %s has unknown parent %s") %
1730 1730 (short(n), short(p)))
1731 1731 try:
1732 1732 changes = self.changelog.read(n)
1733 1733 except KeyboardInterrupt:
1734 1734 self.ui.warn(_("interrupted"))
1735 1735 raise
1736 1736 except Exception, inst:
1737 1737 err(_("unpacking changeset %s: %s") % (short(n), inst))
1738 1738
1739 1739 neededmanifests[changes[0]] = n
1740 1740
1741 1741 for f in changes[3]:
1742 1742 filelinkrevs.setdefault(f, []).append(i)
1743 1743
1744 1744 seen = {}
1745 1745 self.ui.status(_("checking manifests\n"))
1746 1746 checksize(self.manifest, "manifest")
1747 1747
1748 1748 for i in range(self.manifest.count()):
1749 1749 n = self.manifest.node(i)
1750 1750 l = self.manifest.linkrev(n)
1751 1751
1752 1752 if l < 0 or l >= self.changelog.count():
1753 1753 err(_("bad manifest link (%d) at revision %d") % (l, i))
1754 1754
1755 1755 if n in neededmanifests:
1756 1756 del neededmanifests[n]
1757 1757
1758 1758 if n in seen:
1759 1759 err(_("duplicate manifest at revision %d") % i)
1760 1760
1761 1761 seen[n] = 1
1762 1762
1763 1763 for p in self.manifest.parents(n):
1764 1764 if p not in self.manifest.nodemap:
1765 1765 err(_("manifest %s has unknown parent %s") %
1766 1766 (short(n), short(p)))
1767 1767
1768 1768 try:
1769 1769 delta = mdiff.patchtext(self.manifest.delta(n))
1770 1770 except KeyboardInterrupt:
1771 1771 self.ui.warn(_("interrupted"))
1772 1772 raise
1773 1773 except Exception, inst:
1774 1774 err(_("unpacking manifest %s: %s") % (short(n), inst))
1775 1775
1776 1776 ff = [ l.split('\0') for l in delta.splitlines() ]
1777 1777 for f, fn in ff:
1778 1778 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1779 1779
1780 1780 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1781 1781
1782 1782 for m, c in neededmanifests.items():
1783 1783 err(_("Changeset %s refers to unknown manifest %s") %
1784 1784 (short(m), short(c)))
1785 1785 del neededmanifests
1786 1786
1787 1787 for f in filenodes:
1788 1788 if f not in filelinkrevs:
1789 1789 err(_("file %s in manifest but not in changesets") % f)
1790 1790
1791 1791 for f in filelinkrevs:
1792 1792 if f not in filenodes:
1793 1793 err(_("file %s in changeset but not in manifest") % f)
1794 1794
1795 1795 self.ui.status(_("checking files\n"))
1796 1796 ff = filenodes.keys()
1797 1797 ff.sort()
1798 1798 for f in ff:
1799 1799 if f == "/dev/null":
1800 1800 continue
1801 1801 files += 1
1802 1802 fl = self.file(f)
1803 1803 checksize(fl, f)
1804 1804
1805 1805 nodes = {nullid: 1}
1806 1806 seen = {}
1807 1807 for i in range(fl.count()):
1808 1808 revisions += 1
1809 1809 n = fl.node(i)
1810 1810
1811 1811 if n in seen:
1812 1812 err(_("%s: duplicate revision %d") % (f, i))
1813 1813 if n not in filenodes[f]:
1814 1814 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1815 1815 else:
1816 1816 del filenodes[f][n]
1817 1817
1818 1818 flr = fl.linkrev(n)
1819 1819 if flr not in filelinkrevs[f]:
1820 1820 err(_("%s:%s points to unexpected changeset %d")
1821 1821 % (f, short(n), flr))
1822 1822 else:
1823 1823 filelinkrevs[f].remove(flr)
1824 1824
1825 1825 # verify contents
1826 1826 try:
1827 1827 t = fl.read(n)
1828 1828 except KeyboardInterrupt:
1829 1829 self.ui.warn(_("interrupted"))
1830 1830 raise
1831 1831 except Exception, inst:
1832 1832 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1833 1833
1834 1834 # verify parents
1835 1835 (p1, p2) = fl.parents(n)
1836 1836 if p1 not in nodes:
1837 1837 err(_("file %s:%s unknown parent 1 %s") %
1838 1838 (f, short(n), short(p1)))
1839 1839 if p2 not in nodes:
1840 1840 err(_("file %s:%s unknown parent 2 %s") %
1841 1841 (f, short(n), short(p1)))
1842 1842 nodes[n] = 1
1843 1843
1844 1844 # cross-check
1845 1845 for node in filenodes[f]:
1846 1846 err(_("node %s in manifests not in %s") % (hex(node), f))
1847 1847
1848 1848 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1849 1849 (files, changesets, revisions))
1850 1850
1851 1851 if errors[0]:
1852 1852 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1853 1853 return 1
General Comments 0
You need to be logged in to leave comments. Login now