##// END OF EJS Templates
Pass correct username as $HGUSER to hgeditor if "commit -u" is used....
Thomas Arendsen Hein -
r1983:ae12a815 default
parent child Browse files
Show More
@@ -1,1894 +1,1894 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 14 demandload(globals(), "changegroup")
15 15
16 16 class localrepository(object):
17 17 def __del__(self):
18 18 self.transhandle = None
19 19 def __init__(self, parentui, path=None, create=0):
20 20 if not path:
21 21 p = os.getcwd()
22 22 while not os.path.isdir(os.path.join(p, ".hg")):
23 23 oldp = p
24 24 p = os.path.dirname(p)
25 25 if p == oldp:
26 26 raise repo.RepoError(_("no repo found"))
27 27 path = p
28 28 self.path = os.path.join(path, ".hg")
29 29
30 30 if not create and not os.path.isdir(self.path):
31 31 raise repo.RepoError(_("repository %s not found") % path)
32 32
33 33 self.root = os.path.abspath(path)
34 34 self.ui = ui.ui(parentui=parentui)
35 35 self.opener = util.opener(self.path)
36 36 self.wopener = util.opener(self.root)
37 37 self.manifest = manifest.manifest(self.opener)
38 38 self.changelog = changelog.changelog(self.opener)
39 39 self.tagscache = None
40 40 self.nodetagscache = None
41 41 self.encodepats = None
42 42 self.decodepats = None
43 43 self.transhandle = None
44 44
45 45 if create:
46 46 os.mkdir(self.path)
47 47 os.mkdir(self.join("data"))
48 48
49 49 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
50 50 try:
51 51 self.ui.readconfig(self.join("hgrc"), self.root)
52 52 except IOError:
53 53 pass
54 54
55 55 def hook(self, name, throw=False, **args):
56 56 def runhook(name, cmd):
57 57 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
58 58 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
59 59 [(k.upper(), v) for k, v in args.iteritems()])
60 60 r = util.system(cmd, environ=env, cwd=self.root)
61 61 if r:
62 62 desc, r = util.explain_exit(r)
63 63 if throw:
64 64 raise util.Abort(_('%s hook %s') % (name, desc))
65 65 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
66 66 return False
67 67 return True
68 68
69 69 r = True
70 70 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
71 71 if hname.split(".", 1)[0] == name and cmd]
72 72 hooks.sort()
73 73 for hname, cmd in hooks:
74 74 r = runhook(hname, cmd) and r
75 75 return r
76 76
77 77 def tags(self):
78 78 '''return a mapping of tag to node'''
79 79 if not self.tagscache:
80 80 self.tagscache = {}
81 81 def addtag(self, k, n):
82 82 try:
83 83 bin_n = bin(n)
84 84 except TypeError:
85 85 bin_n = ''
86 86 self.tagscache[k.strip()] = bin_n
87 87
88 88 try:
89 89 # read each head of the tags file, ending with the tip
90 90 # and add each tag found to the map, with "newer" ones
91 91 # taking precedence
92 92 fl = self.file(".hgtags")
93 93 h = fl.heads()
94 94 h.reverse()
95 95 for r in h:
96 96 for l in fl.read(r).splitlines():
97 97 if l:
98 98 n, k = l.split(" ", 1)
99 99 addtag(self, k, n)
100 100 except KeyError:
101 101 pass
102 102
103 103 try:
104 104 f = self.opener("localtags")
105 105 for l in f:
106 106 n, k = l.split(" ", 1)
107 107 addtag(self, k, n)
108 108 except IOError:
109 109 pass
110 110
111 111 self.tagscache['tip'] = self.changelog.tip()
112 112
113 113 return self.tagscache
114 114
115 115 def tagslist(self):
116 116 '''return a list of tags ordered by revision'''
117 117 l = []
118 118 for t, n in self.tags().items():
119 119 try:
120 120 r = self.changelog.rev(n)
121 121 except:
122 122 r = -2 # sort to the beginning of the list if unknown
123 123 l.append((r, t, n))
124 124 l.sort()
125 125 return [(t, n) for r, t, n in l]
126 126
127 127 def nodetags(self, node):
128 128 '''return the tags associated with a node'''
129 129 if not self.nodetagscache:
130 130 self.nodetagscache = {}
131 131 for t, n in self.tags().items():
132 132 self.nodetagscache.setdefault(n, []).append(t)
133 133 return self.nodetagscache.get(node, [])
134 134
135 135 def lookup(self, key):
136 136 try:
137 137 return self.tags()[key]
138 138 except KeyError:
139 139 try:
140 140 return self.changelog.lookup(key)
141 141 except:
142 142 raise repo.RepoError(_("unknown revision '%s'") % key)
143 143
144 144 def dev(self):
145 145 return os.stat(self.path).st_dev
146 146
147 147 def local(self):
148 148 return True
149 149
150 150 def join(self, f):
151 151 return os.path.join(self.path, f)
152 152
153 153 def wjoin(self, f):
154 154 return os.path.join(self.root, f)
155 155
156 156 def file(self, f):
157 157 if f[0] == '/':
158 158 f = f[1:]
159 159 return filelog.filelog(self.opener, f)
160 160
161 161 def getcwd(self):
162 162 return self.dirstate.getcwd()
163 163
164 164 def wfile(self, f, mode='r'):
165 165 return self.wopener(f, mode)
166 166
167 167 def wread(self, filename):
168 168 if self.encodepats == None:
169 169 l = []
170 170 for pat, cmd in self.ui.configitems("encode"):
171 171 mf = util.matcher(self.root, "", [pat], [], [])[1]
172 172 l.append((mf, cmd))
173 173 self.encodepats = l
174 174
175 175 data = self.wopener(filename, 'r').read()
176 176
177 177 for mf, cmd in self.encodepats:
178 178 if mf(filename):
179 179 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
180 180 data = util.filter(data, cmd)
181 181 break
182 182
183 183 return data
184 184
185 185 def wwrite(self, filename, data, fd=None):
186 186 if self.decodepats == None:
187 187 l = []
188 188 for pat, cmd in self.ui.configitems("decode"):
189 189 mf = util.matcher(self.root, "", [pat], [], [])[1]
190 190 l.append((mf, cmd))
191 191 self.decodepats = l
192 192
193 193 for mf, cmd in self.decodepats:
194 194 if mf(filename):
195 195 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
196 196 data = util.filter(data, cmd)
197 197 break
198 198
199 199 if fd:
200 200 return fd.write(data)
201 201 return self.wopener(filename, 'w').write(data)
202 202
203 203 def transaction(self):
204 204 tr = self.transhandle
205 205 if tr != None and tr.running():
206 206 return tr.nest()
207 207
208 208 # save dirstate for undo
209 209 try:
210 210 ds = self.opener("dirstate").read()
211 211 except IOError:
212 212 ds = ""
213 213 self.opener("journal.dirstate", "w").write(ds)
214 214
215 215 tr = transaction.transaction(self.ui.warn, self.opener,
216 216 self.join("journal"),
217 217 aftertrans(self.path))
218 218 self.transhandle = tr
219 219 return tr
220 220
221 221 def recover(self):
222 222 l = self.lock()
223 223 if os.path.exists(self.join("journal")):
224 224 self.ui.status(_("rolling back interrupted transaction\n"))
225 225 transaction.rollback(self.opener, self.join("journal"))
226 226 self.reload()
227 227 return True
228 228 else:
229 229 self.ui.warn(_("no interrupted transaction available\n"))
230 230 return False
231 231
232 232 def undo(self, wlock=None):
233 233 if not wlock:
234 234 wlock = self.wlock()
235 235 l = self.lock()
236 236 if os.path.exists(self.join("undo")):
237 237 self.ui.status(_("rolling back last transaction\n"))
238 238 transaction.rollback(self.opener, self.join("undo"))
239 239 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
240 240 self.reload()
241 241 self.wreload()
242 242 else:
243 243 self.ui.warn(_("no undo information available\n"))
244 244
245 245 def wreload(self):
246 246 self.dirstate.read()
247 247
248 248 def reload(self):
249 249 self.changelog.load()
250 250 self.manifest.load()
251 251 self.tagscache = None
252 252 self.nodetagscache = None
253 253
254 254 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
255 255 try:
256 256 l = lock.lock(self.join(lockname), 0, releasefn)
257 257 except lock.LockHeld, inst:
258 258 if not wait:
259 259 raise inst
260 260 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
261 261 try:
262 262 # default to 600 seconds timeout
263 263 l = lock.lock(self.join(lockname),
264 264 int(self.ui.config("ui", "timeout") or 600),
265 265 releasefn)
266 266 except lock.LockHeld, inst:
267 267 raise util.Abort(_("timeout while waiting for "
268 268 "lock held by %s") % inst.args[0])
269 269 if acquirefn:
270 270 acquirefn()
271 271 return l
272 272
273 273 def lock(self, wait=1):
274 274 return self.do_lock("lock", wait, acquirefn=self.reload)
275 275
276 276 def wlock(self, wait=1):
277 277 return self.do_lock("wlock", wait,
278 278 self.dirstate.write,
279 279 self.wreload)
280 280
281 281 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
282 282 "determine whether a new filenode is needed"
283 283 fp1 = manifest1.get(filename, nullid)
284 284 fp2 = manifest2.get(filename, nullid)
285 285
286 286 if fp2 != nullid:
287 287 # is one parent an ancestor of the other?
288 288 fpa = filelog.ancestor(fp1, fp2)
289 289 if fpa == fp1:
290 290 fp1, fp2 = fp2, nullid
291 291 elif fpa == fp2:
292 292 fp2 = nullid
293 293
294 294 # is the file unmodified from the parent? report existing entry
295 295 if fp2 == nullid and text == filelog.read(fp1):
296 296 return (fp1, None, None)
297 297
298 298 return (None, fp1, fp2)
299 299
300 300 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
301 301 orig_parent = self.dirstate.parents()[0] or nullid
302 302 p1 = p1 or self.dirstate.parents()[0] or nullid
303 303 p2 = p2 or self.dirstate.parents()[1] or nullid
304 304 c1 = self.changelog.read(p1)
305 305 c2 = self.changelog.read(p2)
306 306 m1 = self.manifest.read(c1[0])
307 307 mf1 = self.manifest.readflags(c1[0])
308 308 m2 = self.manifest.read(c2[0])
309 309 changed = []
310 310
311 311 if orig_parent == p1:
312 312 update_dirstate = 1
313 313 else:
314 314 update_dirstate = 0
315 315
316 316 if not wlock:
317 317 wlock = self.wlock()
318 318 l = self.lock()
319 319 tr = self.transaction()
320 320 mm = m1.copy()
321 321 mfm = mf1.copy()
322 322 linkrev = self.changelog.count()
323 323 for f in files:
324 324 try:
325 325 t = self.wread(f)
326 326 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
327 327 r = self.file(f)
328 328 mfm[f] = tm
329 329
330 330 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
331 331 if entry:
332 332 mm[f] = entry
333 333 continue
334 334
335 335 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
336 336 changed.append(f)
337 337 if update_dirstate:
338 338 self.dirstate.update([f], "n")
339 339 except IOError:
340 340 try:
341 341 del mm[f]
342 342 del mfm[f]
343 343 if update_dirstate:
344 344 self.dirstate.forget([f])
345 345 except:
346 346 # deleted from p2?
347 347 pass
348 348
349 349 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
350 350 user = user or self.ui.username()
351 351 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
352 352 tr.close()
353 353 if update_dirstate:
354 354 self.dirstate.setparents(n, nullid)
355 355
356 356 def commit(self, files=None, text="", user=None, date=None,
357 357 match=util.always, force=False, lock=None, wlock=None):
358 358 commit = []
359 359 remove = []
360 360 changed = []
361 361
362 362 if files:
363 363 for f in files:
364 364 s = self.dirstate.state(f)
365 365 if s in 'nmai':
366 366 commit.append(f)
367 367 elif s == 'r':
368 368 remove.append(f)
369 369 else:
370 370 self.ui.warn(_("%s not tracked!\n") % f)
371 371 else:
372 372 modified, added, removed, deleted, unknown = self.changes(match=match)
373 373 commit = modified + added
374 374 remove = removed
375 375
376 376 p1, p2 = self.dirstate.parents()
377 377 c1 = self.changelog.read(p1)
378 378 c2 = self.changelog.read(p2)
379 379 m1 = self.manifest.read(c1[0])
380 380 mf1 = self.manifest.readflags(c1[0])
381 381 m2 = self.manifest.read(c2[0])
382 382
383 383 if not commit and not remove and not force and p2 == nullid:
384 384 self.ui.status(_("nothing changed\n"))
385 385 return None
386 386
387 387 xp1 = hex(p1)
388 388 if p2 == nullid: xp2 = ''
389 389 else: xp2 = hex(p2)
390 390
391 391 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
392 392
393 393 if not wlock:
394 394 wlock = self.wlock()
395 395 if not lock:
396 396 lock = self.lock()
397 397 tr = self.transaction()
398 398
399 399 # check in files
400 400 new = {}
401 401 linkrev = self.changelog.count()
402 402 commit.sort()
403 403 for f in commit:
404 404 self.ui.note(f + "\n")
405 405 try:
406 406 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
407 407 t = self.wread(f)
408 408 except IOError:
409 409 self.ui.warn(_("trouble committing %s!\n") % f)
410 410 raise
411 411
412 412 r = self.file(f)
413 413
414 414 meta = {}
415 415 cp = self.dirstate.copied(f)
416 416 if cp:
417 417 meta["copy"] = cp
418 418 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
419 419 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
420 420 fp1, fp2 = nullid, nullid
421 421 else:
422 422 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
423 423 if entry:
424 424 new[f] = entry
425 425 continue
426 426
427 427 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
428 428 # remember what we've added so that we can later calculate
429 429 # the files to pull from a set of changesets
430 430 changed.append(f)
431 431
432 432 # update manifest
433 433 m1 = m1.copy()
434 434 m1.update(new)
435 435 for f in remove:
436 436 if f in m1:
437 437 del m1[f]
438 438 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
439 439 (new, remove))
440 440
441 441 # add changeset
442 442 new = new.keys()
443 443 new.sort()
444 444
445 user = user or self.ui.username()
445 446 if not text:
446 447 edittext = [""]
447 448 if p2 != nullid:
448 449 edittext.append("HG: branch merge")
449 450 edittext.extend(["HG: changed %s" % f for f in changed])
450 451 edittext.extend(["HG: removed %s" % f for f in remove])
451 452 if not changed and not remove:
452 453 edittext.append("HG: no files changed")
453 454 edittext.append("")
454 455 # run editor in the repository root
455 456 olddir = os.getcwd()
456 457 os.chdir(self.root)
457 edittext = self.ui.edit("\n".join(edittext))
458 edittext = self.ui.edit("\n".join(edittext), user)
458 459 os.chdir(olddir)
459 460 if not edittext.rstrip():
460 461 return None
461 462 text = edittext
462 463
463 user = user or self.ui.username()
464 464 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
465 465 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
466 466 parent2=xp2)
467 467 tr.close()
468 468
469 469 self.dirstate.setparents(n)
470 470 self.dirstate.update(new, "n")
471 471 self.dirstate.forget(remove)
472 472
473 473 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
474 474 return n
475 475
476 476 def walk(self, node=None, files=[], match=util.always):
477 477 if node:
478 478 fdict = dict.fromkeys(files)
479 479 for fn in self.manifest.read(self.changelog.read(node)[0]):
480 480 fdict.pop(fn, None)
481 481 if match(fn):
482 482 yield 'm', fn
483 483 for fn in fdict:
484 484 self.ui.warn(_('%s: No such file in rev %s\n') % (
485 485 util.pathto(self.getcwd(), fn), short(node)))
486 486 else:
487 487 for src, fn in self.dirstate.walk(files, match):
488 488 yield src, fn
489 489
490 490 def changes(self, node1=None, node2=None, files=[], match=util.always,
491 491 wlock=None):
492 492 """return changes between two nodes or node and working directory
493 493
494 494 If node1 is None, use the first dirstate parent instead.
495 495 If node2 is None, compare node1 with working directory.
496 496 """
497 497
498 498 def fcmp(fn, mf):
499 499 t1 = self.wread(fn)
500 500 t2 = self.file(fn).read(mf.get(fn, nullid))
501 501 return cmp(t1, t2)
502 502
503 503 def mfmatches(node):
504 504 change = self.changelog.read(node)
505 505 mf = dict(self.manifest.read(change[0]))
506 506 for fn in mf.keys():
507 507 if not match(fn):
508 508 del mf[fn]
509 509 return mf
510 510
511 511 if node1:
512 512 # read the manifest from node1 before the manifest from node2,
513 513 # so that we'll hit the manifest cache if we're going through
514 514 # all the revisions in parent->child order.
515 515 mf1 = mfmatches(node1)
516 516
517 517 # are we comparing the working directory?
518 518 if not node2:
519 519 if not wlock:
520 520 try:
521 521 wlock = self.wlock(wait=0)
522 522 except lock.LockException:
523 523 wlock = None
524 524 lookup, modified, added, removed, deleted, unknown = (
525 525 self.dirstate.changes(files, match))
526 526
527 527 # are we comparing working dir against its parent?
528 528 if not node1:
529 529 if lookup:
530 530 # do a full compare of any files that might have changed
531 531 mf2 = mfmatches(self.dirstate.parents()[0])
532 532 for f in lookup:
533 533 if fcmp(f, mf2):
534 534 modified.append(f)
535 535 elif wlock is not None:
536 536 self.dirstate.update([f], "n")
537 537 else:
538 538 # we are comparing working dir against non-parent
539 539 # generate a pseudo-manifest for the working dir
540 540 mf2 = mfmatches(self.dirstate.parents()[0])
541 541 for f in lookup + modified + added:
542 542 mf2[f] = ""
543 543 for f in removed:
544 544 if f in mf2:
545 545 del mf2[f]
546 546 else:
547 547 # we are comparing two revisions
548 548 deleted, unknown = [], []
549 549 mf2 = mfmatches(node2)
550 550
551 551 if node1:
552 552 # flush lists from dirstate before comparing manifests
553 553 modified, added = [], []
554 554
555 555 for fn in mf2:
556 556 if mf1.has_key(fn):
557 557 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
558 558 modified.append(fn)
559 559 del mf1[fn]
560 560 else:
561 561 added.append(fn)
562 562
563 563 removed = mf1.keys()
564 564
565 565 # sort and return results:
566 566 for l in modified, added, removed, deleted, unknown:
567 567 l.sort()
568 568 return (modified, added, removed, deleted, unknown)
569 569
570 570 def add(self, list, wlock=None):
571 571 if not wlock:
572 572 wlock = self.wlock()
573 573 for f in list:
574 574 p = self.wjoin(f)
575 575 if not os.path.exists(p):
576 576 self.ui.warn(_("%s does not exist!\n") % f)
577 577 elif not os.path.isfile(p):
578 578 self.ui.warn(_("%s not added: only files supported currently\n")
579 579 % f)
580 580 elif self.dirstate.state(f) in 'an':
581 581 self.ui.warn(_("%s already tracked!\n") % f)
582 582 else:
583 583 self.dirstate.update([f], "a")
584 584
585 585 def forget(self, list, wlock=None):
586 586 if not wlock:
587 587 wlock = self.wlock()
588 588 for f in list:
589 589 if self.dirstate.state(f) not in 'ai':
590 590 self.ui.warn(_("%s not added!\n") % f)
591 591 else:
592 592 self.dirstate.forget([f])
593 593
594 594 def remove(self, list, unlink=False, wlock=None):
595 595 if unlink:
596 596 for f in list:
597 597 try:
598 598 util.unlink(self.wjoin(f))
599 599 except OSError, inst:
600 600 if inst.errno != errno.ENOENT:
601 601 raise
602 602 if not wlock:
603 603 wlock = self.wlock()
604 604 for f in list:
605 605 p = self.wjoin(f)
606 606 if os.path.exists(p):
607 607 self.ui.warn(_("%s still exists!\n") % f)
608 608 elif self.dirstate.state(f) == 'a':
609 609 self.dirstate.forget([f])
610 610 elif f not in self.dirstate:
611 611 self.ui.warn(_("%s not tracked!\n") % f)
612 612 else:
613 613 self.dirstate.update([f], "r")
614 614
615 615 def undelete(self, list, wlock=None):
616 616 p = self.dirstate.parents()[0]
617 617 mn = self.changelog.read(p)[0]
618 618 mf = self.manifest.readflags(mn)
619 619 m = self.manifest.read(mn)
620 620 if not wlock:
621 621 wlock = self.wlock()
622 622 for f in list:
623 623 if self.dirstate.state(f) not in "r":
624 624 self.ui.warn("%s not removed!\n" % f)
625 625 else:
626 626 t = self.file(f).read(m[f])
627 627 self.wwrite(f, t)
628 628 util.set_exec(self.wjoin(f), mf[f])
629 629 self.dirstate.update([f], "n")
630 630
631 631 def copy(self, source, dest, wlock=None):
632 632 p = self.wjoin(dest)
633 633 if not os.path.exists(p):
634 634 self.ui.warn(_("%s does not exist!\n") % dest)
635 635 elif not os.path.isfile(p):
636 636 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
637 637 else:
638 638 if not wlock:
639 639 wlock = self.wlock()
640 640 if self.dirstate.state(dest) == '?':
641 641 self.dirstate.update([dest], "a")
642 642 self.dirstate.copy(source, dest)
643 643
644 644 def heads(self, start=None):
645 645 heads = self.changelog.heads(start)
646 646 # sort the output in rev descending order
647 647 heads = [(-self.changelog.rev(h), h) for h in heads]
648 648 heads.sort()
649 649 return [n for (r, n) in heads]
650 650
651 651 # branchlookup returns a dict giving a list of branches for
652 652 # each head. A branch is defined as the tag of a node or
653 653 # the branch of the node's parents. If a node has multiple
654 654 # branch tags, tags are eliminated if they are visible from other
655 655 # branch tags.
656 656 #
657 657 # So, for this graph: a->b->c->d->e
658 658 # \ /
659 659 # aa -----/
660 660 # a has tag 2.6.12
661 661 # d has tag 2.6.13
662 662 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
663 663 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
664 664 # from the list.
665 665 #
666 666 # It is possible that more than one head will have the same branch tag.
667 667 # callers need to check the result for multiple heads under the same
668 668 # branch tag if that is a problem for them (ie checkout of a specific
669 669 # branch).
670 670 #
671 671 # passing in a specific branch will limit the depth of the search
672 672 # through the parents. It won't limit the branches returned in the
673 673 # result though.
674 674 def branchlookup(self, heads=None, branch=None):
675 675 if not heads:
676 676 heads = self.heads()
677 677 headt = [ h for h in heads ]
678 678 chlog = self.changelog
679 679 branches = {}
680 680 merges = []
681 681 seenmerge = {}
682 682
683 683 # traverse the tree once for each head, recording in the branches
684 684 # dict which tags are visible from this head. The branches
685 685 # dict also records which tags are visible from each tag
686 686 # while we traverse.
687 687 while headt or merges:
688 688 if merges:
689 689 n, found = merges.pop()
690 690 visit = [n]
691 691 else:
692 692 h = headt.pop()
693 693 visit = [h]
694 694 found = [h]
695 695 seen = {}
696 696 while visit:
697 697 n = visit.pop()
698 698 if n in seen:
699 699 continue
700 700 pp = chlog.parents(n)
701 701 tags = self.nodetags(n)
702 702 if tags:
703 703 for x in tags:
704 704 if x == 'tip':
705 705 continue
706 706 for f in found:
707 707 branches.setdefault(f, {})[n] = 1
708 708 branches.setdefault(n, {})[n] = 1
709 709 break
710 710 if n not in found:
711 711 found.append(n)
712 712 if branch in tags:
713 713 continue
714 714 seen[n] = 1
715 715 if pp[1] != nullid and n not in seenmerge:
716 716 merges.append((pp[1], [x for x in found]))
717 717 seenmerge[n] = 1
718 718 if pp[0] != nullid:
719 719 visit.append(pp[0])
720 720 # traverse the branches dict, eliminating branch tags from each
721 721 # head that are visible from another branch tag for that head.
722 722 out = {}
723 723 viscache = {}
724 724 for h in heads:
725 725 def visible(node):
726 726 if node in viscache:
727 727 return viscache[node]
728 728 ret = {}
729 729 visit = [node]
730 730 while visit:
731 731 x = visit.pop()
732 732 if x in viscache:
733 733 ret.update(viscache[x])
734 734 elif x not in ret:
735 735 ret[x] = 1
736 736 if x in branches:
737 737 visit[len(visit):] = branches[x].keys()
738 738 viscache[node] = ret
739 739 return ret
740 740 if h not in branches:
741 741 continue
742 742 # O(n^2), but somewhat limited. This only searches the
743 743 # tags visible from a specific head, not all the tags in the
744 744 # whole repo.
745 745 for b in branches[h]:
746 746 vis = False
747 747 for bb in branches[h].keys():
748 748 if b != bb:
749 749 if b in visible(bb):
750 750 vis = True
751 751 break
752 752 if not vis:
753 753 l = out.setdefault(h, [])
754 754 l[len(l):] = self.nodetags(b)
755 755 return out
756 756
757 757 def branches(self, nodes):
758 758 if not nodes:
759 759 nodes = [self.changelog.tip()]
760 760 b = []
761 761 for n in nodes:
762 762 t = n
763 763 while n:
764 764 p = self.changelog.parents(n)
765 765 if p[1] != nullid or p[0] == nullid:
766 766 b.append((t, n, p[0], p[1]))
767 767 break
768 768 n = p[0]
769 769 return b
770 770
771 771 def between(self, pairs):
772 772 r = []
773 773
774 774 for top, bottom in pairs:
775 775 n, l, i = top, [], 0
776 776 f = 1
777 777
778 778 while n != bottom:
779 779 p = self.changelog.parents(n)[0]
780 780 if i == f:
781 781 l.append(n)
782 782 f = f * 2
783 783 n = p
784 784 i += 1
785 785
786 786 r.append(l)
787 787
788 788 return r
789 789
790 790 def findincoming(self, remote, base=None, heads=None, force=False):
791 791 m = self.changelog.nodemap
792 792 search = []
793 793 fetch = {}
794 794 seen = {}
795 795 seenbranch = {}
796 796 if base == None:
797 797 base = {}
798 798
799 799 # assume we're closer to the tip than the root
800 800 # and start by examining the heads
801 801 self.ui.status(_("searching for changes\n"))
802 802
803 803 if not heads:
804 804 heads = remote.heads()
805 805
806 806 unknown = []
807 807 for h in heads:
808 808 if h not in m:
809 809 unknown.append(h)
810 810 else:
811 811 base[h] = 1
812 812
813 813 if not unknown:
814 814 return []
815 815
816 816 rep = {}
817 817 reqcnt = 0
818 818
819 819 # search through remote branches
820 820 # a 'branch' here is a linear segment of history, with four parts:
821 821 # head, root, first parent, second parent
822 822 # (a branch always has two parents (or none) by definition)
823 823 unknown = remote.branches(unknown)
824 824 while unknown:
825 825 r = []
826 826 while unknown:
827 827 n = unknown.pop(0)
828 828 if n[0] in seen:
829 829 continue
830 830
831 831 self.ui.debug(_("examining %s:%s\n")
832 832 % (short(n[0]), short(n[1])))
833 833 if n[0] == nullid:
834 834 break
835 835 if n in seenbranch:
836 836 self.ui.debug(_("branch already found\n"))
837 837 continue
838 838 if n[1] and n[1] in m: # do we know the base?
839 839 self.ui.debug(_("found incomplete branch %s:%s\n")
840 840 % (short(n[0]), short(n[1])))
841 841 search.append(n) # schedule branch range for scanning
842 842 seenbranch[n] = 1
843 843 else:
844 844 if n[1] not in seen and n[1] not in fetch:
845 845 if n[2] in m and n[3] in m:
846 846 self.ui.debug(_("found new changeset %s\n") %
847 847 short(n[1]))
848 848 fetch[n[1]] = 1 # earliest unknown
849 849 base[n[2]] = 1 # latest known
850 850 continue
851 851
852 852 for a in n[2:4]:
853 853 if a not in rep:
854 854 r.append(a)
855 855 rep[a] = 1
856 856
857 857 seen[n[0]] = 1
858 858
859 859 if r:
860 860 reqcnt += 1
861 861 self.ui.debug(_("request %d: %s\n") %
862 862 (reqcnt, " ".join(map(short, r))))
863 863 for p in range(0, len(r), 10):
864 864 for b in remote.branches(r[p:p+10]):
865 865 self.ui.debug(_("received %s:%s\n") %
866 866 (short(b[0]), short(b[1])))
867 867 if b[0] in m:
868 868 self.ui.debug(_("found base node %s\n")
869 869 % short(b[0]))
870 870 base[b[0]] = 1
871 871 elif b[0] not in seen:
872 872 unknown.append(b)
873 873
874 874 # do binary search on the branches we found
875 875 while search:
876 876 n = search.pop(0)
877 877 reqcnt += 1
878 878 l = remote.between([(n[0], n[1])])[0]
879 879 l.append(n[1])
880 880 p = n[0]
881 881 f = 1
882 882 for i in l:
883 883 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
884 884 if i in m:
885 885 if f <= 2:
886 886 self.ui.debug(_("found new branch changeset %s\n") %
887 887 short(p))
888 888 fetch[p] = 1
889 889 base[i] = 1
890 890 else:
891 891 self.ui.debug(_("narrowed branch search to %s:%s\n")
892 892 % (short(p), short(i)))
893 893 search.append((p, i))
894 894 break
895 895 p, f = i, f * 2
896 896
897 897 # sanity check our fetch list
898 898 for f in fetch.keys():
899 899 if f in m:
900 900 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
901 901
902 902 if base.keys() == [nullid]:
903 903 if force:
904 904 self.ui.warn(_("warning: repository is unrelated\n"))
905 905 else:
906 906 raise util.Abort(_("repository is unrelated"))
907 907
908 908 self.ui.note(_("found new changesets starting at ") +
909 909 " ".join([short(f) for f in fetch]) + "\n")
910 910
911 911 self.ui.debug(_("%d total queries\n") % reqcnt)
912 912
913 913 return fetch.keys()
914 914
915 915 def findoutgoing(self, remote, base=None, heads=None, force=False):
916 916 if base == None:
917 917 base = {}
918 918 self.findincoming(remote, base, heads, force=force)
919 919
920 920 self.ui.debug(_("common changesets up to ")
921 921 + " ".join(map(short, base.keys())) + "\n")
922 922
923 923 remain = dict.fromkeys(self.changelog.nodemap)
924 924
925 925 # prune everything remote has from the tree
926 926 del remain[nullid]
927 927 remove = base.keys()
928 928 while remove:
929 929 n = remove.pop(0)
930 930 if n in remain:
931 931 del remain[n]
932 932 for p in self.changelog.parents(n):
933 933 remove.append(p)
934 934
935 935 # find every node whose parents have been pruned
936 936 subset = []
937 937 for n in remain:
938 938 p1, p2 = self.changelog.parents(n)
939 939 if p1 not in remain and p2 not in remain:
940 940 subset.append(n)
941 941
942 942 # this is the set of all roots we have to push
943 943 return subset
944 944
945 945 def pull(self, remote, heads=None, force=False):
946 946 l = self.lock()
947 947
948 948 # if we have an empty repo, fetch everything
949 949 if self.changelog.tip() == nullid:
950 950 self.ui.status(_("requesting all changes\n"))
951 951 fetch = [nullid]
952 952 else:
953 953 fetch = self.findincoming(remote, force=force)
954 954
955 955 if not fetch:
956 956 self.ui.status(_("no changes found\n"))
957 957 return 1
958 958
959 959 if heads is None:
960 960 cg = remote.changegroup(fetch, 'pull')
961 961 else:
962 962 cg = remote.changegroupsubset(fetch, heads, 'pull')
963 963 return self.addchangegroup(cg)
964 964
965 965 def push(self, remote, force=False, revs=None):
966 966 lock = remote.lock()
967 967
968 968 base = {}
969 969 heads = remote.heads()
970 970 inc = self.findincoming(remote, base, heads, force=force)
971 971 if not force and inc:
972 972 self.ui.warn(_("abort: unsynced remote changes!\n"))
973 973 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
974 974 return 1
975 975
976 976 update = self.findoutgoing(remote, base)
977 977 if revs is not None:
978 978 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
979 979 else:
980 980 bases, heads = update, self.changelog.heads()
981 981
982 982 if not bases:
983 983 self.ui.status(_("no changes found\n"))
984 984 return 1
985 985 elif not force:
986 986 if len(bases) < len(heads):
987 987 self.ui.warn(_("abort: push creates new remote branches!\n"))
988 988 self.ui.status(_("(did you forget to merge?"
989 989 " use push -f to force)\n"))
990 990 return 1
991 991
992 992 if revs is None:
993 993 cg = self.changegroup(update, 'push')
994 994 else:
995 995 cg = self.changegroupsubset(update, revs, 'push')
996 996 return remote.addchangegroup(cg)
997 997
998 998 def changegroupsubset(self, bases, heads, source):
999 999 """This function generates a changegroup consisting of all the nodes
1000 1000 that are descendents of any of the bases, and ancestors of any of
1001 1001 the heads.
1002 1002
1003 1003 It is fairly complex as determining which filenodes and which
1004 1004 manifest nodes need to be included for the changeset to be complete
1005 1005 is non-trivial.
1006 1006
1007 1007 Another wrinkle is doing the reverse, figuring out which changeset in
1008 1008 the changegroup a particular filenode or manifestnode belongs to."""
1009 1009
1010 1010 self.hook('preoutgoing', throw=True, source=source)
1011 1011
1012 1012 # Set up some initial variables
1013 1013 # Make it easy to refer to self.changelog
1014 1014 cl = self.changelog
1015 1015 # msng is short for missing - compute the list of changesets in this
1016 1016 # changegroup.
1017 1017 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1018 1018 # Some bases may turn out to be superfluous, and some heads may be
1019 1019 # too. nodesbetween will return the minimal set of bases and heads
1020 1020 # necessary to re-create the changegroup.
1021 1021
1022 1022 # Known heads are the list of heads that it is assumed the recipient
1023 1023 # of this changegroup will know about.
1024 1024 knownheads = {}
1025 1025 # We assume that all parents of bases are known heads.
1026 1026 for n in bases:
1027 1027 for p in cl.parents(n):
1028 1028 if p != nullid:
1029 1029 knownheads[p] = 1
1030 1030 knownheads = knownheads.keys()
1031 1031 if knownheads:
1032 1032 # Now that we know what heads are known, we can compute which
1033 1033 # changesets are known. The recipient must know about all
1034 1034 # changesets required to reach the known heads from the null
1035 1035 # changeset.
1036 1036 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1037 1037 junk = None
1038 1038 # Transform the list into an ersatz set.
1039 1039 has_cl_set = dict.fromkeys(has_cl_set)
1040 1040 else:
1041 1041 # If there were no known heads, the recipient cannot be assumed to
1042 1042 # know about any changesets.
1043 1043 has_cl_set = {}
1044 1044
1045 1045 # Make it easy to refer to self.manifest
1046 1046 mnfst = self.manifest
1047 1047 # We don't know which manifests are missing yet
1048 1048 msng_mnfst_set = {}
1049 1049 # Nor do we know which filenodes are missing.
1050 1050 msng_filenode_set = {}
1051 1051
1052 1052 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1053 1053 junk = None
1054 1054
1055 1055 # A changeset always belongs to itself, so the changenode lookup
1056 1056 # function for a changenode is identity.
1057 1057 def identity(x):
1058 1058 return x
1059 1059
1060 1060 # A function generating function. Sets up an environment for the
1061 1061 # inner function.
1062 1062 def cmp_by_rev_func(revlog):
1063 1063 # Compare two nodes by their revision number in the environment's
1064 1064 # revision history. Since the revision number both represents the
1065 1065 # most efficient order to read the nodes in, and represents a
1066 1066 # topological sorting of the nodes, this function is often useful.
1067 1067 def cmp_by_rev(a, b):
1068 1068 return cmp(revlog.rev(a), revlog.rev(b))
1069 1069 return cmp_by_rev
1070 1070
1071 1071 # If we determine that a particular file or manifest node must be a
1072 1072 # node that the recipient of the changegroup will already have, we can
1073 1073 # also assume the recipient will have all the parents. This function
1074 1074 # prunes them from the set of missing nodes.
1075 1075 def prune_parents(revlog, hasset, msngset):
1076 1076 haslst = hasset.keys()
1077 1077 haslst.sort(cmp_by_rev_func(revlog))
1078 1078 for node in haslst:
1079 1079 parentlst = [p for p in revlog.parents(node) if p != nullid]
1080 1080 while parentlst:
1081 1081 n = parentlst.pop()
1082 1082 if n not in hasset:
1083 1083 hasset[n] = 1
1084 1084 p = [p for p in revlog.parents(n) if p != nullid]
1085 1085 parentlst.extend(p)
1086 1086 for n in hasset:
1087 1087 msngset.pop(n, None)
1088 1088
1089 1089 # This is a function generating function used to set up an environment
1090 1090 # for the inner function to execute in.
1091 1091 def manifest_and_file_collector(changedfileset):
1092 1092 # This is an information gathering function that gathers
1093 1093 # information from each changeset node that goes out as part of
1094 1094 # the changegroup. The information gathered is a list of which
1095 1095 # manifest nodes are potentially required (the recipient may
1096 1096 # already have them) and total list of all files which were
1097 1097 # changed in any changeset in the changegroup.
1098 1098 #
1099 1099 # We also remember the first changenode we saw any manifest
1100 1100 # referenced by so we can later determine which changenode 'owns'
1101 1101 # the manifest.
1102 1102 def collect_manifests_and_files(clnode):
1103 1103 c = cl.read(clnode)
1104 1104 for f in c[3]:
1105 1105 # This is to make sure we only have one instance of each
1106 1106 # filename string for each filename.
1107 1107 changedfileset.setdefault(f, f)
1108 1108 msng_mnfst_set.setdefault(c[0], clnode)
1109 1109 return collect_manifests_and_files
1110 1110
1111 1111 # Figure out which manifest nodes (of the ones we think might be part
1112 1112 # of the changegroup) the recipient must know about and remove them
1113 1113 # from the changegroup.
1114 1114 def prune_manifests():
1115 1115 has_mnfst_set = {}
1116 1116 for n in msng_mnfst_set:
1117 1117 # If a 'missing' manifest thinks it belongs to a changenode
1118 1118 # the recipient is assumed to have, obviously the recipient
1119 1119 # must have that manifest.
1120 1120 linknode = cl.node(mnfst.linkrev(n))
1121 1121 if linknode in has_cl_set:
1122 1122 has_mnfst_set[n] = 1
1123 1123 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1124 1124
1125 1125 # Use the information collected in collect_manifests_and_files to say
1126 1126 # which changenode any manifestnode belongs to.
1127 1127 def lookup_manifest_link(mnfstnode):
1128 1128 return msng_mnfst_set[mnfstnode]
1129 1129
1130 1130 # A function generating function that sets up the initial environment
1131 1131 # the inner function.
1132 1132 def filenode_collector(changedfiles):
1133 1133 next_rev = [0]
1134 1134 # This gathers information from each manifestnode included in the
1135 1135 # changegroup about which filenodes the manifest node references
1136 1136 # so we can include those in the changegroup too.
1137 1137 #
1138 1138 # It also remembers which changenode each filenode belongs to. It
1139 1139 # does this by assuming the a filenode belongs to the changenode
1140 1140 # the first manifest that references it belongs to.
1141 1141 def collect_msng_filenodes(mnfstnode):
1142 1142 r = mnfst.rev(mnfstnode)
1143 1143 if r == next_rev[0]:
1144 1144 # If the last rev we looked at was the one just previous,
1145 1145 # we only need to see a diff.
1146 1146 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1147 1147 # For each line in the delta
1148 1148 for dline in delta.splitlines():
1149 1149 # get the filename and filenode for that line
1150 1150 f, fnode = dline.split('\0')
1151 1151 fnode = bin(fnode[:40])
1152 1152 f = changedfiles.get(f, None)
1153 1153 # And if the file is in the list of files we care
1154 1154 # about.
1155 1155 if f is not None:
1156 1156 # Get the changenode this manifest belongs to
1157 1157 clnode = msng_mnfst_set[mnfstnode]
1158 1158 # Create the set of filenodes for the file if
1159 1159 # there isn't one already.
1160 1160 ndset = msng_filenode_set.setdefault(f, {})
1161 1161 # And set the filenode's changelog node to the
1162 1162 # manifest's if it hasn't been set already.
1163 1163 ndset.setdefault(fnode, clnode)
1164 1164 else:
1165 1165 # Otherwise we need a full manifest.
1166 1166 m = mnfst.read(mnfstnode)
1167 1167 # For every file in we care about.
1168 1168 for f in changedfiles:
1169 1169 fnode = m.get(f, None)
1170 1170 # If it's in the manifest
1171 1171 if fnode is not None:
1172 1172 # See comments above.
1173 1173 clnode = msng_mnfst_set[mnfstnode]
1174 1174 ndset = msng_filenode_set.setdefault(f, {})
1175 1175 ndset.setdefault(fnode, clnode)
1176 1176 # Remember the revision we hope to see next.
1177 1177 next_rev[0] = r + 1
1178 1178 return collect_msng_filenodes
1179 1179
1180 1180 # We have a list of filenodes we think we need for a file, lets remove
1181 1181 # all those we now the recipient must have.
1182 1182 def prune_filenodes(f, filerevlog):
1183 1183 msngset = msng_filenode_set[f]
1184 1184 hasset = {}
1185 1185 # If a 'missing' filenode thinks it belongs to a changenode we
1186 1186 # assume the recipient must have, then the recipient must have
1187 1187 # that filenode.
1188 1188 for n in msngset:
1189 1189 clnode = cl.node(filerevlog.linkrev(n))
1190 1190 if clnode in has_cl_set:
1191 1191 hasset[n] = 1
1192 1192 prune_parents(filerevlog, hasset, msngset)
1193 1193
1194 1194 # A function generator function that sets up the a context for the
1195 1195 # inner function.
1196 1196 def lookup_filenode_link_func(fname):
1197 1197 msngset = msng_filenode_set[fname]
1198 1198 # Lookup the changenode the filenode belongs to.
1199 1199 def lookup_filenode_link(fnode):
1200 1200 return msngset[fnode]
1201 1201 return lookup_filenode_link
1202 1202
1203 1203 # Now that we have all theses utility functions to help out and
1204 1204 # logically divide up the task, generate the group.
1205 1205 def gengroup():
1206 1206 # The set of changed files starts empty.
1207 1207 changedfiles = {}
1208 1208 # Create a changenode group generator that will call our functions
1209 1209 # back to lookup the owning changenode and collect information.
1210 1210 group = cl.group(msng_cl_lst, identity,
1211 1211 manifest_and_file_collector(changedfiles))
1212 1212 for chnk in group:
1213 1213 yield chnk
1214 1214
1215 1215 # The list of manifests has been collected by the generator
1216 1216 # calling our functions back.
1217 1217 prune_manifests()
1218 1218 msng_mnfst_lst = msng_mnfst_set.keys()
1219 1219 # Sort the manifestnodes by revision number.
1220 1220 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1221 1221 # Create a generator for the manifestnodes that calls our lookup
1222 1222 # and data collection functions back.
1223 1223 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1224 1224 filenode_collector(changedfiles))
1225 1225 for chnk in group:
1226 1226 yield chnk
1227 1227
1228 1228 # These are no longer needed, dereference and toss the memory for
1229 1229 # them.
1230 1230 msng_mnfst_lst = None
1231 1231 msng_mnfst_set.clear()
1232 1232
1233 1233 changedfiles = changedfiles.keys()
1234 1234 changedfiles.sort()
1235 1235 # Go through all our files in order sorted by name.
1236 1236 for fname in changedfiles:
1237 1237 filerevlog = self.file(fname)
1238 1238 # Toss out the filenodes that the recipient isn't really
1239 1239 # missing.
1240 1240 if msng_filenode_set.has_key(fname):
1241 1241 prune_filenodes(fname, filerevlog)
1242 1242 msng_filenode_lst = msng_filenode_set[fname].keys()
1243 1243 else:
1244 1244 msng_filenode_lst = []
1245 1245 # If any filenodes are left, generate the group for them,
1246 1246 # otherwise don't bother.
1247 1247 if len(msng_filenode_lst) > 0:
1248 1248 yield changegroup.genchunk(fname)
1249 1249 # Sort the filenodes by their revision #
1250 1250 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1251 1251 # Create a group generator and only pass in a changenode
1252 1252 # lookup function as we need to collect no information
1253 1253 # from filenodes.
1254 1254 group = filerevlog.group(msng_filenode_lst,
1255 1255 lookup_filenode_link_func(fname))
1256 1256 for chnk in group:
1257 1257 yield chnk
1258 1258 if msng_filenode_set.has_key(fname):
1259 1259 # Don't need this anymore, toss it to free memory.
1260 1260 del msng_filenode_set[fname]
1261 1261 # Signal that no more groups are left.
1262 1262 yield changegroup.closechunk()
1263 1263
1264 1264 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1265 1265
1266 1266 return util.chunkbuffer(gengroup())
1267 1267
1268 1268 def changegroup(self, basenodes, source):
1269 1269 """Generate a changegroup of all nodes that we have that a recipient
1270 1270 doesn't.
1271 1271
1272 1272 This is much easier than the previous function as we can assume that
1273 1273 the recipient has any changenode we aren't sending them."""
1274 1274
1275 1275 self.hook('preoutgoing', throw=True, source=source)
1276 1276
1277 1277 cl = self.changelog
1278 1278 nodes = cl.nodesbetween(basenodes, None)[0]
1279 1279 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1280 1280
1281 1281 def identity(x):
1282 1282 return x
1283 1283
1284 1284 def gennodelst(revlog):
1285 1285 for r in xrange(0, revlog.count()):
1286 1286 n = revlog.node(r)
1287 1287 if revlog.linkrev(n) in revset:
1288 1288 yield n
1289 1289
1290 1290 def changed_file_collector(changedfileset):
1291 1291 def collect_changed_files(clnode):
1292 1292 c = cl.read(clnode)
1293 1293 for fname in c[3]:
1294 1294 changedfileset[fname] = 1
1295 1295 return collect_changed_files
1296 1296
1297 1297 def lookuprevlink_func(revlog):
1298 1298 def lookuprevlink(n):
1299 1299 return cl.node(revlog.linkrev(n))
1300 1300 return lookuprevlink
1301 1301
1302 1302 def gengroup():
1303 1303 # construct a list of all changed files
1304 1304 changedfiles = {}
1305 1305
1306 1306 for chnk in cl.group(nodes, identity,
1307 1307 changed_file_collector(changedfiles)):
1308 1308 yield chnk
1309 1309 changedfiles = changedfiles.keys()
1310 1310 changedfiles.sort()
1311 1311
1312 1312 mnfst = self.manifest
1313 1313 nodeiter = gennodelst(mnfst)
1314 1314 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1315 1315 yield chnk
1316 1316
1317 1317 for fname in changedfiles:
1318 1318 filerevlog = self.file(fname)
1319 1319 nodeiter = gennodelst(filerevlog)
1320 1320 nodeiter = list(nodeiter)
1321 1321 if nodeiter:
1322 1322 yield changegroup.genchunk(fname)
1323 1323 lookup = lookuprevlink_func(filerevlog)
1324 1324 for chnk in filerevlog.group(nodeiter, lookup):
1325 1325 yield chnk
1326 1326
1327 1327 yield changegroup.closechunk()
1328 1328 self.hook('outgoing', node=hex(nodes[0]), source=source)
1329 1329
1330 1330 return util.chunkbuffer(gengroup())
1331 1331
1332 1332 def addchangegroup(self, source):
1333 1333
1334 1334 def csmap(x):
1335 1335 self.ui.debug(_("add changeset %s\n") % short(x))
1336 1336 return self.changelog.count()
1337 1337
1338 1338 def revmap(x):
1339 1339 return self.changelog.rev(x)
1340 1340
1341 1341 if not source:
1342 1342 return
1343 1343
1344 1344 self.hook('prechangegroup', throw=True)
1345 1345
1346 1346 changesets = files = revisions = 0
1347 1347
1348 1348 tr = self.transaction()
1349 1349
1350 1350 oldheads = len(self.changelog.heads())
1351 1351
1352 1352 # pull off the changeset group
1353 1353 self.ui.status(_("adding changesets\n"))
1354 1354 co = self.changelog.tip()
1355 1355 chunkiter = changegroup.chunkiter(source)
1356 1356 cn = self.changelog.addgroup(chunkiter, csmap, tr, 1) # unique
1357 1357 cnr, cor = map(self.changelog.rev, (cn, co))
1358 1358 if cn == nullid:
1359 1359 cnr = cor
1360 1360 changesets = cnr - cor
1361 1361
1362 1362 # pull off the manifest group
1363 1363 self.ui.status(_("adding manifests\n"))
1364 1364 mm = self.manifest.tip()
1365 1365 chunkiter = changegroup.chunkiter(source)
1366 1366 mo = self.manifest.addgroup(chunkiter, revmap, tr)
1367 1367
1368 1368 # process the files
1369 1369 self.ui.status(_("adding file changes\n"))
1370 1370 while 1:
1371 1371 f = changegroup.getchunk(source)
1372 1372 if not f:
1373 1373 break
1374 1374 self.ui.debug(_("adding %s revisions\n") % f)
1375 1375 fl = self.file(f)
1376 1376 o = fl.count()
1377 1377 chunkiter = changegroup.chunkiter(source)
1378 1378 n = fl.addgroup(chunkiter, revmap, tr)
1379 1379 revisions += fl.count() - o
1380 1380 files += 1
1381 1381
1382 1382 newheads = len(self.changelog.heads())
1383 1383 heads = ""
1384 1384 if oldheads and newheads > oldheads:
1385 1385 heads = _(" (+%d heads)") % (newheads - oldheads)
1386 1386
1387 1387 self.ui.status(_("added %d changesets"
1388 1388 " with %d changes to %d files%s\n")
1389 1389 % (changesets, revisions, files, heads))
1390 1390
1391 1391 self.hook('pretxnchangegroup', throw=True,
1392 1392 node=hex(self.changelog.node(cor+1)))
1393 1393
1394 1394 tr.close()
1395 1395
1396 1396 if changesets > 0:
1397 1397 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1398 1398
1399 1399 for i in range(cor + 1, cnr + 1):
1400 1400 self.hook("incoming", node=hex(self.changelog.node(i)))
1401 1401
1402 1402 def update(self, node, allow=False, force=False, choose=None,
1403 1403 moddirstate=True, forcemerge=False, wlock=None):
1404 1404 pl = self.dirstate.parents()
1405 1405 if not force and pl[1] != nullid:
1406 1406 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1407 1407 return 1
1408 1408
1409 1409 err = False
1410 1410
1411 1411 p1, p2 = pl[0], node
1412 1412 pa = self.changelog.ancestor(p1, p2)
1413 1413 m1n = self.changelog.read(p1)[0]
1414 1414 m2n = self.changelog.read(p2)[0]
1415 1415 man = self.manifest.ancestor(m1n, m2n)
1416 1416 m1 = self.manifest.read(m1n)
1417 1417 mf1 = self.manifest.readflags(m1n)
1418 1418 m2 = self.manifest.read(m2n).copy()
1419 1419 mf2 = self.manifest.readflags(m2n)
1420 1420 ma = self.manifest.read(man)
1421 1421 mfa = self.manifest.readflags(man)
1422 1422
1423 1423 modified, added, removed, deleted, unknown = self.changes()
1424 1424
1425 1425 # is this a jump, or a merge? i.e. is there a linear path
1426 1426 # from p1 to p2?
1427 1427 linear_path = (pa == p1 or pa == p2)
1428 1428
1429 1429 if allow and linear_path:
1430 1430 raise util.Abort(_("there is nothing to merge, "
1431 1431 "just use 'hg update'"))
1432 1432 if allow and not forcemerge:
1433 1433 if modified or added or removed:
1434 1434 raise util.Abort(_("outstanding uncommited changes"))
1435 1435 if not forcemerge and not force:
1436 1436 for f in unknown:
1437 1437 if f in m2:
1438 1438 t1 = self.wread(f)
1439 1439 t2 = self.file(f).read(m2[f])
1440 1440 if cmp(t1, t2) != 0:
1441 1441 raise util.Abort(_("'%s' already exists in the working"
1442 1442 " dir and differs from remote") % f)
1443 1443
1444 1444 # resolve the manifest to determine which files
1445 1445 # we care about merging
1446 1446 self.ui.note(_("resolving manifests\n"))
1447 1447 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1448 1448 (force, allow, moddirstate, linear_path))
1449 1449 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1450 1450 (short(man), short(m1n), short(m2n)))
1451 1451
1452 1452 merge = {}
1453 1453 get = {}
1454 1454 remove = []
1455 1455
1456 1456 # construct a working dir manifest
1457 1457 mw = m1.copy()
1458 1458 mfw = mf1.copy()
1459 1459 umap = dict.fromkeys(unknown)
1460 1460
1461 1461 for f in added + modified + unknown:
1462 1462 mw[f] = ""
1463 1463 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1464 1464
1465 1465 if moddirstate and not wlock:
1466 1466 wlock = self.wlock()
1467 1467
1468 1468 for f in deleted + removed:
1469 1469 if f in mw:
1470 1470 del mw[f]
1471 1471
1472 1472 # If we're jumping between revisions (as opposed to merging),
1473 1473 # and if neither the working directory nor the target rev has
1474 1474 # the file, then we need to remove it from the dirstate, to
1475 1475 # prevent the dirstate from listing the file when it is no
1476 1476 # longer in the manifest.
1477 1477 if moddirstate and linear_path and f not in m2:
1478 1478 self.dirstate.forget((f,))
1479 1479
1480 1480 # Compare manifests
1481 1481 for f, n in mw.iteritems():
1482 1482 if choose and not choose(f):
1483 1483 continue
1484 1484 if f in m2:
1485 1485 s = 0
1486 1486
1487 1487 # is the wfile new since m1, and match m2?
1488 1488 if f not in m1:
1489 1489 t1 = self.wread(f)
1490 1490 t2 = self.file(f).read(m2[f])
1491 1491 if cmp(t1, t2) == 0:
1492 1492 n = m2[f]
1493 1493 del t1, t2
1494 1494
1495 1495 # are files different?
1496 1496 if n != m2[f]:
1497 1497 a = ma.get(f, nullid)
1498 1498 # are both different from the ancestor?
1499 1499 if n != a and m2[f] != a:
1500 1500 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1501 1501 # merge executable bits
1502 1502 # "if we changed or they changed, change in merge"
1503 1503 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1504 1504 mode = ((a^b) | (a^c)) ^ a
1505 1505 merge[f] = (m1.get(f, nullid), m2[f], mode)
1506 1506 s = 1
1507 1507 # are we clobbering?
1508 1508 # is remote's version newer?
1509 1509 # or are we going back in time?
1510 1510 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1511 1511 self.ui.debug(_(" remote %s is newer, get\n") % f)
1512 1512 get[f] = m2[f]
1513 1513 s = 1
1514 1514 elif f in umap:
1515 1515 # this unknown file is the same as the checkout
1516 1516 get[f] = m2[f]
1517 1517
1518 1518 if not s and mfw[f] != mf2[f]:
1519 1519 if force:
1520 1520 self.ui.debug(_(" updating permissions for %s\n") % f)
1521 1521 util.set_exec(self.wjoin(f), mf2[f])
1522 1522 else:
1523 1523 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1524 1524 mode = ((a^b) | (a^c)) ^ a
1525 1525 if mode != b:
1526 1526 self.ui.debug(_(" updating permissions for %s\n")
1527 1527 % f)
1528 1528 util.set_exec(self.wjoin(f), mode)
1529 1529 del m2[f]
1530 1530 elif f in ma:
1531 1531 if n != ma[f]:
1532 1532 r = _("d")
1533 1533 if not force and (linear_path or allow):
1534 1534 r = self.ui.prompt(
1535 1535 (_(" local changed %s which remote deleted\n") % f) +
1536 1536 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1537 1537 if r == _("d"):
1538 1538 remove.append(f)
1539 1539 else:
1540 1540 self.ui.debug(_("other deleted %s\n") % f)
1541 1541 remove.append(f) # other deleted it
1542 1542 else:
1543 1543 # file is created on branch or in working directory
1544 1544 if force and f not in umap:
1545 1545 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1546 1546 remove.append(f)
1547 1547 elif n == m1.get(f, nullid): # same as parent
1548 1548 if p2 == pa: # going backwards?
1549 1549 self.ui.debug(_("remote deleted %s\n") % f)
1550 1550 remove.append(f)
1551 1551 else:
1552 1552 self.ui.debug(_("local modified %s, keeping\n") % f)
1553 1553 else:
1554 1554 self.ui.debug(_("working dir created %s, keeping\n") % f)
1555 1555
1556 1556 for f, n in m2.iteritems():
1557 1557 if choose and not choose(f):
1558 1558 continue
1559 1559 if f[0] == "/":
1560 1560 continue
1561 1561 if f in ma and n != ma[f]:
1562 1562 r = _("k")
1563 1563 if not force and (linear_path or allow):
1564 1564 r = self.ui.prompt(
1565 1565 (_("remote changed %s which local deleted\n") % f) +
1566 1566 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1567 1567 if r == _("k"):
1568 1568 get[f] = n
1569 1569 elif f not in ma:
1570 1570 self.ui.debug(_("remote created %s\n") % f)
1571 1571 get[f] = n
1572 1572 else:
1573 1573 if force or p2 == pa: # going backwards?
1574 1574 self.ui.debug(_("local deleted %s, recreating\n") % f)
1575 1575 get[f] = n
1576 1576 else:
1577 1577 self.ui.debug(_("local deleted %s\n") % f)
1578 1578
1579 1579 del mw, m1, m2, ma
1580 1580
1581 1581 if force:
1582 1582 for f in merge:
1583 1583 get[f] = merge[f][1]
1584 1584 merge = {}
1585 1585
1586 1586 if linear_path or force:
1587 1587 # we don't need to do any magic, just jump to the new rev
1588 1588 branch_merge = False
1589 1589 p1, p2 = p2, nullid
1590 1590 else:
1591 1591 if not allow:
1592 1592 self.ui.status(_("this update spans a branch"
1593 1593 " affecting the following files:\n"))
1594 1594 fl = merge.keys() + get.keys()
1595 1595 fl.sort()
1596 1596 for f in fl:
1597 1597 cf = ""
1598 1598 if f in merge:
1599 1599 cf = _(" (resolve)")
1600 1600 self.ui.status(" %s%s\n" % (f, cf))
1601 1601 self.ui.warn(_("aborting update spanning branches!\n"))
1602 1602 self.ui.status(_("(use update -m to merge across branches"
1603 1603 " or -C to lose changes)\n"))
1604 1604 return 1
1605 1605 branch_merge = True
1606 1606
1607 1607 # get the files we don't need to change
1608 1608 files = get.keys()
1609 1609 files.sort()
1610 1610 for f in files:
1611 1611 if f[0] == "/":
1612 1612 continue
1613 1613 self.ui.note(_("getting %s\n") % f)
1614 1614 t = self.file(f).read(get[f])
1615 1615 self.wwrite(f, t)
1616 1616 util.set_exec(self.wjoin(f), mf2[f])
1617 1617 if moddirstate:
1618 1618 if branch_merge:
1619 1619 self.dirstate.update([f], 'n', st_mtime=-1)
1620 1620 else:
1621 1621 self.dirstate.update([f], 'n')
1622 1622
1623 1623 # merge the tricky bits
1624 1624 failedmerge = []
1625 1625 files = merge.keys()
1626 1626 files.sort()
1627 1627 xp1 = hex(p1)
1628 1628 xp2 = hex(p2)
1629 1629 for f in files:
1630 1630 self.ui.status(_("merging %s\n") % f)
1631 1631 my, other, flag = merge[f]
1632 1632 ret = self.merge3(f, my, other, xp1, xp2)
1633 1633 if ret:
1634 1634 err = True
1635 1635 failedmerge.append(f)
1636 1636 util.set_exec(self.wjoin(f), flag)
1637 1637 if moddirstate:
1638 1638 if branch_merge:
1639 1639 # We've done a branch merge, mark this file as merged
1640 1640 # so that we properly record the merger later
1641 1641 self.dirstate.update([f], 'm')
1642 1642 else:
1643 1643 # We've update-merged a locally modified file, so
1644 1644 # we set the dirstate to emulate a normal checkout
1645 1645 # of that file some time in the past. Thus our
1646 1646 # merge will appear as a normal local file
1647 1647 # modification.
1648 1648 f_len = len(self.file(f).read(other))
1649 1649 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1650 1650
1651 1651 remove.sort()
1652 1652 for f in remove:
1653 1653 self.ui.note(_("removing %s\n") % f)
1654 1654 util.audit_path(f)
1655 1655 try:
1656 1656 util.unlink(self.wjoin(f))
1657 1657 except OSError, inst:
1658 1658 if inst.errno != errno.ENOENT:
1659 1659 self.ui.warn(_("update failed to remove %s: %s!\n") %
1660 1660 (f, inst.strerror))
1661 1661 if moddirstate:
1662 1662 if branch_merge:
1663 1663 self.dirstate.update(remove, 'r')
1664 1664 else:
1665 1665 self.dirstate.forget(remove)
1666 1666
1667 1667 if moddirstate:
1668 1668 self.dirstate.setparents(p1, p2)
1669 1669
1670 1670 stat = ((len(get), _("updated")),
1671 1671 (len(merge) - len(failedmerge), _("merged")),
1672 1672 (len(remove), _("removed")),
1673 1673 (len(failedmerge), _("unresolved")))
1674 1674 note = ", ".join([_("%d files %s") % s for s in stat])
1675 1675 self.ui.note("%s\n" % note)
1676 1676 if moddirstate and branch_merge:
1677 1677 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1678 1678
1679 1679 return err
1680 1680
1681 1681 def merge3(self, fn, my, other, p1, p2):
1682 1682 """perform a 3-way merge in the working directory"""
1683 1683
1684 1684 def temp(prefix, node):
1685 1685 pre = "%s~%s." % (os.path.basename(fn), prefix)
1686 1686 (fd, name) = tempfile.mkstemp("", pre)
1687 1687 f = os.fdopen(fd, "wb")
1688 1688 self.wwrite(fn, fl.read(node), f)
1689 1689 f.close()
1690 1690 return name
1691 1691
1692 1692 fl = self.file(fn)
1693 1693 base = fl.ancestor(my, other)
1694 1694 a = self.wjoin(fn)
1695 1695 b = temp("base", base)
1696 1696 c = temp("other", other)
1697 1697
1698 1698 self.ui.note(_("resolving %s\n") % fn)
1699 1699 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1700 1700 (fn, short(my), short(other), short(base)))
1701 1701
1702 1702 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1703 1703 or "hgmerge")
1704 1704 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1705 1705 environ={'HG_FILE': fn,
1706 1706 'HG_MY_NODE': p1,
1707 1707 'HG_OTHER_NODE': p2,
1708 1708 'HG_FILE_MY_NODE': hex(my),
1709 1709 'HG_FILE_OTHER_NODE': hex(other),
1710 1710 'HG_FILE_BASE_NODE': hex(base)})
1711 1711 if r:
1712 1712 self.ui.warn(_("merging %s failed!\n") % fn)
1713 1713
1714 1714 os.unlink(b)
1715 1715 os.unlink(c)
1716 1716 return r
1717 1717
1718 1718 def verify(self):
1719 1719 filelinkrevs = {}
1720 1720 filenodes = {}
1721 1721 changesets = revisions = files = 0
1722 1722 errors = [0]
1723 1723 neededmanifests = {}
1724 1724
1725 1725 def err(msg):
1726 1726 self.ui.warn(msg + "\n")
1727 1727 errors[0] += 1
1728 1728
1729 1729 def checksize(obj, name):
1730 1730 d = obj.checksize()
1731 1731 if d[0]:
1732 1732 err(_("%s data length off by %d bytes") % (name, d[0]))
1733 1733 if d[1]:
1734 1734 err(_("%s index contains %d extra bytes") % (name, d[1]))
1735 1735
1736 1736 seen = {}
1737 1737 self.ui.status(_("checking changesets\n"))
1738 1738 checksize(self.changelog, "changelog")
1739 1739
1740 1740 for i in range(self.changelog.count()):
1741 1741 changesets += 1
1742 1742 n = self.changelog.node(i)
1743 1743 l = self.changelog.linkrev(n)
1744 1744 if l != i:
1745 1745 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1746 1746 if n in seen:
1747 1747 err(_("duplicate changeset at revision %d") % i)
1748 1748 seen[n] = 1
1749 1749
1750 1750 for p in self.changelog.parents(n):
1751 1751 if p not in self.changelog.nodemap:
1752 1752 err(_("changeset %s has unknown parent %s") %
1753 1753 (short(n), short(p)))
1754 1754 try:
1755 1755 changes = self.changelog.read(n)
1756 1756 except KeyboardInterrupt:
1757 1757 self.ui.warn(_("interrupted"))
1758 1758 raise
1759 1759 except Exception, inst:
1760 1760 err(_("unpacking changeset %s: %s") % (short(n), inst))
1761 1761 continue
1762 1762
1763 1763 neededmanifests[changes[0]] = n
1764 1764
1765 1765 for f in changes[3]:
1766 1766 filelinkrevs.setdefault(f, []).append(i)
1767 1767
1768 1768 seen = {}
1769 1769 self.ui.status(_("checking manifests\n"))
1770 1770 checksize(self.manifest, "manifest")
1771 1771
1772 1772 for i in range(self.manifest.count()):
1773 1773 n = self.manifest.node(i)
1774 1774 l = self.manifest.linkrev(n)
1775 1775
1776 1776 if l < 0 or l >= self.changelog.count():
1777 1777 err(_("bad manifest link (%d) at revision %d") % (l, i))
1778 1778
1779 1779 if n in neededmanifests:
1780 1780 del neededmanifests[n]
1781 1781
1782 1782 if n in seen:
1783 1783 err(_("duplicate manifest at revision %d") % i)
1784 1784
1785 1785 seen[n] = 1
1786 1786
1787 1787 for p in self.manifest.parents(n):
1788 1788 if p not in self.manifest.nodemap:
1789 1789 err(_("manifest %s has unknown parent %s") %
1790 1790 (short(n), short(p)))
1791 1791
1792 1792 try:
1793 1793 delta = mdiff.patchtext(self.manifest.delta(n))
1794 1794 except KeyboardInterrupt:
1795 1795 self.ui.warn(_("interrupted"))
1796 1796 raise
1797 1797 except Exception, inst:
1798 1798 err(_("unpacking manifest %s: %s") % (short(n), inst))
1799 1799 continue
1800 1800
1801 1801 try:
1802 1802 ff = [ l.split('\0') for l in delta.splitlines() ]
1803 1803 for f, fn in ff:
1804 1804 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1805 1805 except (ValueError, TypeError), inst:
1806 1806 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1807 1807
1808 1808 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1809 1809
1810 1810 for m, c in neededmanifests.items():
1811 1811 err(_("Changeset %s refers to unknown manifest %s") %
1812 1812 (short(m), short(c)))
1813 1813 del neededmanifests
1814 1814
1815 1815 for f in filenodes:
1816 1816 if f not in filelinkrevs:
1817 1817 err(_("file %s in manifest but not in changesets") % f)
1818 1818
1819 1819 for f in filelinkrevs:
1820 1820 if f not in filenodes:
1821 1821 err(_("file %s in changeset but not in manifest") % f)
1822 1822
1823 1823 self.ui.status(_("checking files\n"))
1824 1824 ff = filenodes.keys()
1825 1825 ff.sort()
1826 1826 for f in ff:
1827 1827 if f == "/dev/null":
1828 1828 continue
1829 1829 files += 1
1830 1830 if not f:
1831 1831 err(_("file without name in manifest %s") % short(n))
1832 1832 continue
1833 1833 fl = self.file(f)
1834 1834 checksize(fl, f)
1835 1835
1836 1836 nodes = {nullid: 1}
1837 1837 seen = {}
1838 1838 for i in range(fl.count()):
1839 1839 revisions += 1
1840 1840 n = fl.node(i)
1841 1841
1842 1842 if n in seen:
1843 1843 err(_("%s: duplicate revision %d") % (f, i))
1844 1844 if n not in filenodes[f]:
1845 1845 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1846 1846 else:
1847 1847 del filenodes[f][n]
1848 1848
1849 1849 flr = fl.linkrev(n)
1850 1850 if flr not in filelinkrevs.get(f, []):
1851 1851 err(_("%s:%s points to unexpected changeset %d")
1852 1852 % (f, short(n), flr))
1853 1853 else:
1854 1854 filelinkrevs[f].remove(flr)
1855 1855
1856 1856 # verify contents
1857 1857 try:
1858 1858 t = fl.read(n)
1859 1859 except KeyboardInterrupt:
1860 1860 self.ui.warn(_("interrupted"))
1861 1861 raise
1862 1862 except Exception, inst:
1863 1863 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1864 1864
1865 1865 # verify parents
1866 1866 (p1, p2) = fl.parents(n)
1867 1867 if p1 not in nodes:
1868 1868 err(_("file %s:%s unknown parent 1 %s") %
1869 1869 (f, short(n), short(p1)))
1870 1870 if p2 not in nodes:
1871 1871 err(_("file %s:%s unknown parent 2 %s") %
1872 1872 (f, short(n), short(p1)))
1873 1873 nodes[n] = 1
1874 1874
1875 1875 # cross-check
1876 1876 for node in filenodes[f]:
1877 1877 err(_("node %s in manifests not in %s") % (hex(node), f))
1878 1878
1879 1879 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1880 1880 (files, changesets, revisions))
1881 1881
1882 1882 if errors[0]:
1883 1883 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1884 1884 return 1
1885 1885
1886 1886 # used to avoid circular references so destructors work
1887 1887 def aftertrans(base):
1888 1888 p = base
1889 1889 def a():
1890 1890 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1891 1891 util.rename(os.path.join(p, "journal.dirstate"),
1892 1892 os.path.join(p, "undo.dirstate"))
1893 1893 return a
1894 1894
@@ -1,218 +1,217 b''
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import ConfigParser
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "os re socket sys util")
12 12
13 13 class ui(object):
14 14 def __init__(self, verbose=False, debug=False, quiet=False,
15 15 interactive=True, parentui=None):
16 16 self.overlay = {}
17 17 if parentui is None:
18 18 # this is the parent of all ui children
19 19 self.parentui = None
20 20 self.cdata = ConfigParser.SafeConfigParser()
21 21 self.readconfig(util.rcpath())
22 22
23 23 self.quiet = self.configbool("ui", "quiet")
24 24 self.verbose = self.configbool("ui", "verbose")
25 25 self.debugflag = self.configbool("ui", "debug")
26 26 self.interactive = self.configbool("ui", "interactive", True)
27 27
28 28 self.updateopts(verbose, debug, quiet, interactive)
29 29 self.diffcache = None
30 30 else:
31 31 # parentui may point to an ui object which is already a child
32 32 self.parentui = parentui.parentui or parentui
33 33 parent_cdata = self.parentui.cdata
34 34 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
35 35 # make interpolation work
36 36 for section in parent_cdata.sections():
37 37 self.cdata.add_section(section)
38 38 for name, value in parent_cdata.items(section, raw=True):
39 39 self.cdata.set(section, name, value)
40 40
41 41 def __getattr__(self, key):
42 42 return getattr(self.parentui, key)
43 43
44 44 def updateopts(self, verbose=False, debug=False, quiet=False,
45 45 interactive=True):
46 46 self.quiet = (self.quiet or quiet) and not verbose and not debug
47 47 self.verbose = (self.verbose or verbose) or debug
48 48 self.debugflag = (self.debugflag or debug)
49 49 self.interactive = (self.interactive and interactive)
50 50
51 51 def readconfig(self, fn, root=None):
52 52 if isinstance(fn, basestring):
53 53 fn = [fn]
54 54 for f in fn:
55 55 try:
56 56 self.cdata.read(f)
57 57 except ConfigParser.ParsingError, inst:
58 58 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
59 59 # translate paths relative to root (or home) into absolute paths
60 60 if root is None:
61 61 root = os.path.expanduser('~')
62 62 for name, path in self.configitems("paths"):
63 63 if path and path.find("://") == -1 and not os.path.isabs(path):
64 64 self.cdata.set("paths", name, os.path.join(root, path))
65 65
66 66 def setconfig(self, section, name, val):
67 67 self.overlay[(section, name)] = val
68 68
69 69 def config(self, section, name, default=None):
70 70 if self.overlay.has_key((section, name)):
71 71 return self.overlay[(section, name)]
72 72 if self.cdata.has_option(section, name):
73 73 try:
74 74 return self.cdata.get(section, name)
75 75 except ConfigParser.InterpolationError, inst:
76 76 raise util.Abort(_("Error in configuration:\n%s") % inst)
77 77 if self.parentui is None:
78 78 return default
79 79 else:
80 80 return self.parentui.config(section, name, default)
81 81
82 82 def configbool(self, section, name, default=False):
83 83 if self.overlay.has_key((section, name)):
84 84 return self.overlay[(section, name)]
85 85 if self.cdata.has_option(section, name):
86 86 try:
87 87 return self.cdata.getboolean(section, name)
88 88 except ConfigParser.InterpolationError, inst:
89 89 raise util.Abort(_("Error in configuration:\n%s") % inst)
90 90 if self.parentui is None:
91 91 return default
92 92 else:
93 93 return self.parentui.configbool(section, name, default)
94 94
95 95 def configitems(self, section):
96 96 items = {}
97 97 if self.parentui is not None:
98 98 items = dict(self.parentui.configitems(section))
99 99 if self.cdata.has_section(section):
100 100 try:
101 101 items.update(dict(self.cdata.items(section)))
102 102 except ConfigParser.InterpolationError, inst:
103 103 raise util.Abort(_("Error in configuration:\n%s") % inst)
104 104 x = items.items()
105 105 x.sort()
106 106 return x
107 107
108 108 def walkconfig(self, seen=None):
109 109 if seen is None:
110 110 seen = {}
111 111 for (section, name), value in self.overlay.iteritems():
112 112 yield section, name, value
113 113 seen[section, name] = 1
114 114 for section in self.cdata.sections():
115 115 for name, value in self.cdata.items(section):
116 116 if (section, name) in seen: continue
117 117 yield section, name, value.replace('\n', '\\n')
118 118 seen[section, name] = 1
119 119 if self.parentui is not None:
120 120 for parent in self.parentui.walkconfig(seen):
121 121 yield parent
122 122
123 123 def extensions(self):
124 124 return self.configitems("extensions")
125 125
126 126 def diffopts(self):
127 127 if self.diffcache:
128 128 return self.diffcache
129 129 ret = { 'showfunc' : True, 'ignorews' : False}
130 130 for x in self.configitems("diff"):
131 131 k = x[0].lower()
132 132 v = x[1]
133 133 if v:
134 134 v = v.lower()
135 135 if v == 'true':
136 136 value = True
137 137 else:
138 138 value = False
139 139 ret[k] = value
140 140 self.diffcache = ret
141 141 return ret
142 142
143 143 def username(self):
144 144 return (os.environ.get("HGUSER") or
145 145 self.config("ui", "username") or
146 146 os.environ.get("EMAIL") or
147 147 (os.environ.get("LOGNAME",
148 148 os.environ.get("USERNAME", "unknown"))
149 149 + '@' + socket.getfqdn()))
150 150
151 151 def shortuser(self, user):
152 152 """Return a short representation of a user name or email address."""
153 153 if not self.verbose: user = util.shortuser(user)
154 154 return user
155 155
156 156 def expandpath(self, loc):
157 157 """Return repository location relative to cwd or from [paths]"""
158 158 if loc.find("://") != -1 or os.path.exists(loc):
159 159 return loc
160 160
161 161 return self.config("paths", loc, loc)
162 162
163 163 def write(self, *args):
164 164 for a in args:
165 165 sys.stdout.write(str(a))
166 166
167 167 def write_err(self, *args):
168 168 if not sys.stdout.closed: sys.stdout.flush()
169 169 for a in args:
170 170 sys.stderr.write(str(a))
171 171
172 172 def flush(self):
173 173 try:
174 174 sys.stdout.flush()
175 175 finally:
176 176 sys.stderr.flush()
177 177
178 178 def readline(self):
179 179 return sys.stdin.readline()[:-1]
180 180 def prompt(self, msg, pat, default="y"):
181 181 if not self.interactive: return default
182 182 while 1:
183 183 self.write(msg, " ")
184 184 r = self.readline()
185 185 if re.match(pat, r):
186 186 return r
187 187 else:
188 188 self.write(_("unrecognized response\n"))
189 189 def status(self, *msg):
190 190 if not self.quiet: self.write(*msg)
191 191 def warn(self, *msg):
192 192 self.write_err(*msg)
193 193 def note(self, *msg):
194 194 if self.verbose: self.write(*msg)
195 195 def debug(self, *msg):
196 196 if self.debugflag: self.write(*msg)
197 def edit(self, text):
197 def edit(self, text, user):
198 198 import tempfile
199 199 (fd, name) = tempfile.mkstemp("hg")
200 200 f = os.fdopen(fd, "w")
201 201 f.write(text)
202 202 f.close()
203 203
204 204 editor = (os.environ.get("HGEDITOR") or
205 205 self.config("ui", "editor") or
206 206 os.environ.get("EDITOR", "vi"))
207 207
208 os.environ["HGUSER"] = self.username()
209 208 util.system("%s \"%s\"" % (editor, name),
210 environ={'HGUSER': self.username()},
209 environ={'HGUSER': user},
211 210 onerr=util.Abort, errprefix=_("edit failed"))
212 211
213 212 t = open(name).read()
214 213 t = re.sub("(?m)^HG:.*\n", "", t)
215 214
216 215 os.unlink(name)
217 216
218 217 return t
General Comments 0
You need to be logged in to leave comments. Login now