##// END OF EJS Templates
fix parsing of tags. make parse errors useful. add new tag tests....
Vadim Gelfer -
r2320:dbdce3b9 default
parent child Browse files
Show More
@@ -1,2102 +1,2109
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "revlog traceback")
16 16
17 17 class localrepository(object):
18 18 def __del__(self):
19 19 self.transhandle = None
20 20 def __init__(self, parentui, path=None, create=0):
21 21 if not path:
22 22 p = os.getcwd()
23 23 while not os.path.isdir(os.path.join(p, ".hg")):
24 24 oldp = p
25 25 p = os.path.dirname(p)
26 26 if p == oldp:
27 27 raise repo.RepoError(_("no repo found"))
28 28 path = p
29 29 self.path = os.path.join(path, ".hg")
30 30
31 31 if not create and not os.path.isdir(self.path):
32 32 raise repo.RepoError(_("repository %s not found") % path)
33 33
34 34 self.root = os.path.abspath(path)
35 35 self.origroot = path
36 36 self.ui = ui.ui(parentui=parentui)
37 37 self.opener = util.opener(self.path)
38 38 self.wopener = util.opener(self.root)
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 except IOError:
43 43 pass
44 44
45 45 v = self.ui.revlogopts
46 46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 48 fl = v.get('flags', None)
49 49 flags = 0
50 50 if fl != None:
51 51 for x in fl.split():
52 52 flags |= revlog.flagstr(x)
53 53 elif self.revlogv1:
54 54 flags = revlog.REVLOG_DEFAULT_FLAGS
55 55
56 56 v = self.revlogversion | flags
57 57 self.manifest = manifest.manifest(self.opener, v)
58 58 self.changelog = changelog.changelog(self.opener, v)
59 59
60 60 # the changelog might not have the inline index flag
61 61 # on. If the format of the changelog is the same as found in
62 62 # .hgrc, apply any flags found in the .hgrc as well.
63 63 # Otherwise, just version from the changelog
64 64 v = self.changelog.version
65 65 if v == self.revlogversion:
66 66 v |= flags
67 67 self.revlogversion = v
68 68
69 69 self.tagscache = None
70 70 self.nodetagscache = None
71 71 self.encodepats = None
72 72 self.decodepats = None
73 73 self.transhandle = None
74 74
75 75 if create:
76 76 os.mkdir(self.path)
77 77 os.mkdir(self.join("data"))
78 78
79 79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80 80
81 81 def hook(self, name, throw=False, **args):
82 82 def callhook(hname, funcname):
83 83 '''call python hook. hook is callable object, looked up as
84 84 name in python module. if callable returns "true", hook
85 85 fails, else passes. if hook raises exception, treated as
86 86 hook failure. exception propagates if throw is "true".
87 87
88 88 reason for "true" meaning "hook failed" is so that
89 89 unmodified commands (e.g. mercurial.commands.update) can
90 90 be run as hooks without wrappers to convert return values.'''
91 91
92 92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 93 d = funcname.rfind('.')
94 94 if d == -1:
95 95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 96 % (hname, funcname))
97 97 modname = funcname[:d]
98 98 try:
99 99 obj = __import__(modname)
100 100 except ImportError:
101 101 raise util.Abort(_('%s hook is invalid '
102 102 '(import of "%s" failed)') %
103 103 (hname, modname))
104 104 try:
105 105 for p in funcname.split('.')[1:]:
106 106 obj = getattr(obj, p)
107 107 except AttributeError, err:
108 108 raise util.Abort(_('%s hook is invalid '
109 109 '("%s" is not defined)') %
110 110 (hname, funcname))
111 111 if not callable(obj):
112 112 raise util.Abort(_('%s hook is invalid '
113 113 '("%s" is not callable)') %
114 114 (hname, funcname))
115 115 try:
116 116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 117 except (KeyboardInterrupt, util.SignalInterrupt):
118 118 raise
119 119 except Exception, exc:
120 120 if isinstance(exc, util.Abort):
121 121 self.ui.warn(_('error: %s hook failed: %s\n') %
122 122 (hname, exc.args[0] % exc.args[1:]))
123 123 else:
124 124 self.ui.warn(_('error: %s hook raised an exception: '
125 125 '%s\n') % (hname, exc))
126 126 if throw:
127 127 raise
128 128 if self.ui.traceback:
129 129 traceback.print_exc()
130 130 return True
131 131 if r:
132 132 if throw:
133 133 raise util.Abort(_('%s hook failed') % hname)
134 134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 135 return r
136 136
137 137 def runhook(name, cmd):
138 138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
140 140 r = util.system(cmd, environ=env, cwd=self.root)
141 141 if r:
142 142 desc, r = util.explain_exit(r)
143 143 if throw:
144 144 raise util.Abort(_('%s hook %s') % (name, desc))
145 145 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 146 return r
147 147
148 148 r = False
149 149 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 150 if hname.split(".", 1)[0] == name and cmd]
151 151 hooks.sort()
152 152 for hname, cmd in hooks:
153 153 if cmd.startswith('python:'):
154 154 r = callhook(hname, cmd[7:].strip()) or r
155 155 else:
156 156 r = runhook(hname, cmd) or r
157 157 return r
158 158
159 159 def tags(self):
160 160 '''return a mapping of tag to node'''
161 161 if not self.tagscache:
162 162 self.tagscache = {}
163 163
164 164 def parsetag(line, context):
165 165 if not line:
166 166 return
167 167 s = l.split(" ", 1)
168 168 if len(s) != 2:
169 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
169 self.ui.warn(_("%s: cannot parse entry\n") % context)
170 170 return
171 171 node, key = s
172 key = key.strip()
172 173 try:
173 174 bin_n = bin(node)
174 175 except TypeError:
175 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
176 self.ui.warn(_("%s: node '%s' is not well formed\n") %
177 (context, node))
176 178 return
177 179 if bin_n not in self.changelog.nodemap:
178 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
180 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
181 (context, key))
179 182 return
180 self.tagscache[key.strip()] = bin_n
183 self.tagscache[key] = bin_n
181 184
182 # read each head of the tags file, ending with the tip
185 # read the tags file from each head, ending with the tip,
183 186 # and add each tag found to the map, with "newer" ones
184 187 # taking precedence
188 heads = self.heads()
189 heads.reverse()
185 190 fl = self.file(".hgtags")
186 h = fl.heads()
187 h.reverse()
188 for r in h:
191 for node in heads:
192 change = self.changelog.read(node)
193 rev = self.changelog.rev(node)
194 fn, ff = self.manifest.find(change[0], '.hgtags')
195 if fn is None: continue
189 196 count = 0
190 for l in fl.read(r).splitlines():
197 for l in fl.read(fn).splitlines():
191 198 count += 1
192 parsetag(l, ".hgtags:%d" % count)
193
199 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
200 (rev, short(node), count))
194 201 try:
195 202 f = self.opener("localtags")
196 203 count = 0
197 204 for l in f:
198 205 count += 1
199 parsetag(l, "localtags:%d" % count)
206 parsetag(l, _("localtags, line %d") % count)
200 207 except IOError:
201 208 pass
202 209
203 210 self.tagscache['tip'] = self.changelog.tip()
204 211
205 212 return self.tagscache
206 213
207 214 def tagslist(self):
208 215 '''return a list of tags ordered by revision'''
209 216 l = []
210 217 for t, n in self.tags().items():
211 218 try:
212 219 r = self.changelog.rev(n)
213 220 except:
214 221 r = -2 # sort to the beginning of the list if unknown
215 222 l.append((r, t, n))
216 223 l.sort()
217 224 return [(t, n) for r, t, n in l]
218 225
219 226 def nodetags(self, node):
220 227 '''return the tags associated with a node'''
221 228 if not self.nodetagscache:
222 229 self.nodetagscache = {}
223 230 for t, n in self.tags().items():
224 231 self.nodetagscache.setdefault(n, []).append(t)
225 232 return self.nodetagscache.get(node, [])
226 233
227 234 def lookup(self, key):
228 235 try:
229 236 return self.tags()[key]
230 237 except KeyError:
231 238 try:
232 239 return self.changelog.lookup(key)
233 240 except:
234 241 raise repo.RepoError(_("unknown revision '%s'") % key)
235 242
236 243 def dev(self):
237 244 return os.stat(self.path).st_dev
238 245
239 246 def local(self):
240 247 return True
241 248
242 249 def join(self, f):
243 250 return os.path.join(self.path, f)
244 251
245 252 def wjoin(self, f):
246 253 return os.path.join(self.root, f)
247 254
248 255 def file(self, f):
249 256 if f[0] == '/':
250 257 f = f[1:]
251 258 return filelog.filelog(self.opener, f, self.revlogversion)
252 259
253 260 def getcwd(self):
254 261 return self.dirstate.getcwd()
255 262
256 263 def wfile(self, f, mode='r'):
257 264 return self.wopener(f, mode)
258 265
259 266 def wread(self, filename):
260 267 if self.encodepats == None:
261 268 l = []
262 269 for pat, cmd in self.ui.configitems("encode"):
263 270 mf = util.matcher(self.root, "", [pat], [], [])[1]
264 271 l.append((mf, cmd))
265 272 self.encodepats = l
266 273
267 274 data = self.wopener(filename, 'r').read()
268 275
269 276 for mf, cmd in self.encodepats:
270 277 if mf(filename):
271 278 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
272 279 data = util.filter(data, cmd)
273 280 break
274 281
275 282 return data
276 283
277 284 def wwrite(self, filename, data, fd=None):
278 285 if self.decodepats == None:
279 286 l = []
280 287 for pat, cmd in self.ui.configitems("decode"):
281 288 mf = util.matcher(self.root, "", [pat], [], [])[1]
282 289 l.append((mf, cmd))
283 290 self.decodepats = l
284 291
285 292 for mf, cmd in self.decodepats:
286 293 if mf(filename):
287 294 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
288 295 data = util.filter(data, cmd)
289 296 break
290 297
291 298 if fd:
292 299 return fd.write(data)
293 300 return self.wopener(filename, 'w').write(data)
294 301
295 302 def transaction(self):
296 303 tr = self.transhandle
297 304 if tr != None and tr.running():
298 305 return tr.nest()
299 306
300 307 # save dirstate for undo
301 308 try:
302 309 ds = self.opener("dirstate").read()
303 310 except IOError:
304 311 ds = ""
305 312 self.opener("journal.dirstate", "w").write(ds)
306 313
307 314 tr = transaction.transaction(self.ui.warn, self.opener,
308 315 self.join("journal"),
309 316 aftertrans(self.path))
310 317 self.transhandle = tr
311 318 return tr
312 319
313 320 def recover(self):
314 321 l = self.lock()
315 322 if os.path.exists(self.join("journal")):
316 323 self.ui.status(_("rolling back interrupted transaction\n"))
317 324 transaction.rollback(self.opener, self.join("journal"))
318 325 self.reload()
319 326 return True
320 327 else:
321 328 self.ui.warn(_("no interrupted transaction available\n"))
322 329 return False
323 330
324 331 def undo(self, wlock=None):
325 332 if not wlock:
326 333 wlock = self.wlock()
327 334 l = self.lock()
328 335 if os.path.exists(self.join("undo")):
329 336 self.ui.status(_("rolling back last transaction\n"))
330 337 transaction.rollback(self.opener, self.join("undo"))
331 338 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
332 339 self.reload()
333 340 self.wreload()
334 341 else:
335 342 self.ui.warn(_("no undo information available\n"))
336 343
337 344 def wreload(self):
338 345 self.dirstate.read()
339 346
340 347 def reload(self):
341 348 self.changelog.load()
342 349 self.manifest.load()
343 350 self.tagscache = None
344 351 self.nodetagscache = None
345 352
346 353 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
347 354 desc=None):
348 355 try:
349 356 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
350 357 except lock.LockHeld, inst:
351 358 if not wait:
352 359 raise
353 360 self.ui.warn(_("waiting for lock on %s held by %s\n") %
354 361 (desc, inst.args[0]))
355 362 # default to 600 seconds timeout
356 363 l = lock.lock(self.join(lockname),
357 364 int(self.ui.config("ui", "timeout") or 600),
358 365 releasefn, desc=desc)
359 366 if acquirefn:
360 367 acquirefn()
361 368 return l
362 369
363 370 def lock(self, wait=1):
364 371 return self.do_lock("lock", wait, acquirefn=self.reload,
365 372 desc=_('repository %s') % self.origroot)
366 373
367 374 def wlock(self, wait=1):
368 375 return self.do_lock("wlock", wait, self.dirstate.write,
369 376 self.wreload,
370 377 desc=_('working directory of %s') % self.origroot)
371 378
372 379 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
373 380 "determine whether a new filenode is needed"
374 381 fp1 = manifest1.get(filename, nullid)
375 382 fp2 = manifest2.get(filename, nullid)
376 383
377 384 if fp2 != nullid:
378 385 # is one parent an ancestor of the other?
379 386 fpa = filelog.ancestor(fp1, fp2)
380 387 if fpa == fp1:
381 388 fp1, fp2 = fp2, nullid
382 389 elif fpa == fp2:
383 390 fp2 = nullid
384 391
385 392 # is the file unmodified from the parent? report existing entry
386 393 if fp2 == nullid and text == filelog.read(fp1):
387 394 return (fp1, None, None)
388 395
389 396 return (None, fp1, fp2)
390 397
391 398 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
392 399 orig_parent = self.dirstate.parents()[0] or nullid
393 400 p1 = p1 or self.dirstate.parents()[0] or nullid
394 401 p2 = p2 or self.dirstate.parents()[1] or nullid
395 402 c1 = self.changelog.read(p1)
396 403 c2 = self.changelog.read(p2)
397 404 m1 = self.manifest.read(c1[0])
398 405 mf1 = self.manifest.readflags(c1[0])
399 406 m2 = self.manifest.read(c2[0])
400 407 changed = []
401 408
402 409 if orig_parent == p1:
403 410 update_dirstate = 1
404 411 else:
405 412 update_dirstate = 0
406 413
407 414 if not wlock:
408 415 wlock = self.wlock()
409 416 l = self.lock()
410 417 tr = self.transaction()
411 418 mm = m1.copy()
412 419 mfm = mf1.copy()
413 420 linkrev = self.changelog.count()
414 421 for f in files:
415 422 try:
416 423 t = self.wread(f)
417 424 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
418 425 r = self.file(f)
419 426 mfm[f] = tm
420 427
421 428 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
422 429 if entry:
423 430 mm[f] = entry
424 431 continue
425 432
426 433 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
427 434 changed.append(f)
428 435 if update_dirstate:
429 436 self.dirstate.update([f], "n")
430 437 except IOError:
431 438 try:
432 439 del mm[f]
433 440 del mfm[f]
434 441 if update_dirstate:
435 442 self.dirstate.forget([f])
436 443 except:
437 444 # deleted from p2?
438 445 pass
439 446
440 447 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
441 448 user = user or self.ui.username()
442 449 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
443 450 tr.close()
444 451 if update_dirstate:
445 452 self.dirstate.setparents(n, nullid)
446 453
447 454 def commit(self, files=None, text="", user=None, date=None,
448 455 match=util.always, force=False, lock=None, wlock=None,
449 456 force_editor=False):
450 457 commit = []
451 458 remove = []
452 459 changed = []
453 460
454 461 if files:
455 462 for f in files:
456 463 s = self.dirstate.state(f)
457 464 if s in 'nmai':
458 465 commit.append(f)
459 466 elif s == 'r':
460 467 remove.append(f)
461 468 else:
462 469 self.ui.warn(_("%s not tracked!\n") % f)
463 470 else:
464 471 modified, added, removed, deleted, unknown = self.changes(match=match)
465 472 commit = modified + added
466 473 remove = removed
467 474
468 475 p1, p2 = self.dirstate.parents()
469 476 c1 = self.changelog.read(p1)
470 477 c2 = self.changelog.read(p2)
471 478 m1 = self.manifest.read(c1[0])
472 479 mf1 = self.manifest.readflags(c1[0])
473 480 m2 = self.manifest.read(c2[0])
474 481
475 482 if not commit and not remove and not force and p2 == nullid:
476 483 self.ui.status(_("nothing changed\n"))
477 484 return None
478 485
479 486 xp1 = hex(p1)
480 487 if p2 == nullid: xp2 = ''
481 488 else: xp2 = hex(p2)
482 489
483 490 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
484 491
485 492 if not wlock:
486 493 wlock = self.wlock()
487 494 if not lock:
488 495 lock = self.lock()
489 496 tr = self.transaction()
490 497
491 498 # check in files
492 499 new = {}
493 500 linkrev = self.changelog.count()
494 501 commit.sort()
495 502 for f in commit:
496 503 self.ui.note(f + "\n")
497 504 try:
498 505 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
499 506 t = self.wread(f)
500 507 except IOError:
501 508 self.ui.warn(_("trouble committing %s!\n") % f)
502 509 raise
503 510
504 511 r = self.file(f)
505 512
506 513 meta = {}
507 514 cp = self.dirstate.copied(f)
508 515 if cp:
509 516 meta["copy"] = cp
510 517 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
511 518 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
512 519 fp1, fp2 = nullid, nullid
513 520 else:
514 521 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
515 522 if entry:
516 523 new[f] = entry
517 524 continue
518 525
519 526 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
520 527 # remember what we've added so that we can later calculate
521 528 # the files to pull from a set of changesets
522 529 changed.append(f)
523 530
524 531 # update manifest
525 532 m1 = m1.copy()
526 533 m1.update(new)
527 534 for f in remove:
528 535 if f in m1:
529 536 del m1[f]
530 537 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
531 538 (new, remove))
532 539
533 540 # add changeset
534 541 new = new.keys()
535 542 new.sort()
536 543
537 544 user = user or self.ui.username()
538 545 if not text or force_editor:
539 546 edittext = []
540 547 if text:
541 548 edittext.append(text)
542 549 edittext.append("")
543 550 if p2 != nullid:
544 551 edittext.append("HG: branch merge")
545 552 edittext.extend(["HG: changed %s" % f for f in changed])
546 553 edittext.extend(["HG: removed %s" % f for f in remove])
547 554 if not changed and not remove:
548 555 edittext.append("HG: no files changed")
549 556 edittext.append("")
550 557 # run editor in the repository root
551 558 olddir = os.getcwd()
552 559 os.chdir(self.root)
553 560 text = self.ui.edit("\n".join(edittext), user)
554 561 os.chdir(olddir)
555 562
556 563 lines = [line.rstrip() for line in text.rstrip().splitlines()]
557 564 while lines and not lines[0]:
558 565 del lines[0]
559 566 if not lines:
560 567 return None
561 568 text = '\n'.join(lines)
562 569 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
563 570 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
564 571 parent2=xp2)
565 572 tr.close()
566 573
567 574 self.dirstate.setparents(n)
568 575 self.dirstate.update(new, "n")
569 576 self.dirstate.forget(remove)
570 577
571 578 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
572 579 return n
573 580
574 581 def walk(self, node=None, files=[], match=util.always, badmatch=None):
575 582 if node:
576 583 fdict = dict.fromkeys(files)
577 584 for fn in self.manifest.read(self.changelog.read(node)[0]):
578 585 fdict.pop(fn, None)
579 586 if match(fn):
580 587 yield 'm', fn
581 588 for fn in fdict:
582 589 if badmatch and badmatch(fn):
583 590 if match(fn):
584 591 yield 'b', fn
585 592 else:
586 593 self.ui.warn(_('%s: No such file in rev %s\n') % (
587 594 util.pathto(self.getcwd(), fn), short(node)))
588 595 else:
589 596 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
590 597 yield src, fn
591 598
592 599 def changes(self, node1=None, node2=None, files=[], match=util.always,
593 600 wlock=None, show_ignored=None):
594 601 """return changes between two nodes or node and working directory
595 602
596 603 If node1 is None, use the first dirstate parent instead.
597 604 If node2 is None, compare node1 with working directory.
598 605 """
599 606
600 607 def fcmp(fn, mf):
601 608 t1 = self.wread(fn)
602 609 t2 = self.file(fn).read(mf.get(fn, nullid))
603 610 return cmp(t1, t2)
604 611
605 612 def mfmatches(node):
606 613 change = self.changelog.read(node)
607 614 mf = dict(self.manifest.read(change[0]))
608 615 for fn in mf.keys():
609 616 if not match(fn):
610 617 del mf[fn]
611 618 return mf
612 619
613 620 if node1:
614 621 # read the manifest from node1 before the manifest from node2,
615 622 # so that we'll hit the manifest cache if we're going through
616 623 # all the revisions in parent->child order.
617 624 mf1 = mfmatches(node1)
618 625
619 626 # are we comparing the working directory?
620 627 if not node2:
621 628 if not wlock:
622 629 try:
623 630 wlock = self.wlock(wait=0)
624 631 except lock.LockException:
625 632 wlock = None
626 633 lookup, modified, added, removed, deleted, unknown, ignored = (
627 634 self.dirstate.changes(files, match, show_ignored))
628 635
629 636 # are we comparing working dir against its parent?
630 637 if not node1:
631 638 if lookup:
632 639 # do a full compare of any files that might have changed
633 640 mf2 = mfmatches(self.dirstate.parents()[0])
634 641 for f in lookup:
635 642 if fcmp(f, mf2):
636 643 modified.append(f)
637 644 elif wlock is not None:
638 645 self.dirstate.update([f], "n")
639 646 else:
640 647 # we are comparing working dir against non-parent
641 648 # generate a pseudo-manifest for the working dir
642 649 mf2 = mfmatches(self.dirstate.parents()[0])
643 650 for f in lookup + modified + added:
644 651 mf2[f] = ""
645 652 for f in removed:
646 653 if f in mf2:
647 654 del mf2[f]
648 655 else:
649 656 # we are comparing two revisions
650 657 deleted, unknown, ignored = [], [], []
651 658 mf2 = mfmatches(node2)
652 659
653 660 if node1:
654 661 # flush lists from dirstate before comparing manifests
655 662 modified, added = [], []
656 663
657 664 for fn in mf2:
658 665 if mf1.has_key(fn):
659 666 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
660 667 modified.append(fn)
661 668 del mf1[fn]
662 669 else:
663 670 added.append(fn)
664 671
665 672 removed = mf1.keys()
666 673
667 674 # sort and return results:
668 675 for l in modified, added, removed, deleted, unknown, ignored:
669 676 l.sort()
670 677 if show_ignored is None:
671 678 return (modified, added, removed, deleted, unknown)
672 679 else:
673 680 return (modified, added, removed, deleted, unknown, ignored)
674 681
675 682 def add(self, list, wlock=None):
676 683 if not wlock:
677 684 wlock = self.wlock()
678 685 for f in list:
679 686 p = self.wjoin(f)
680 687 if not os.path.exists(p):
681 688 self.ui.warn(_("%s does not exist!\n") % f)
682 689 elif not os.path.isfile(p):
683 690 self.ui.warn(_("%s not added: only files supported currently\n")
684 691 % f)
685 692 elif self.dirstate.state(f) in 'an':
686 693 self.ui.warn(_("%s already tracked!\n") % f)
687 694 else:
688 695 self.dirstate.update([f], "a")
689 696
690 697 def forget(self, list, wlock=None):
691 698 if not wlock:
692 699 wlock = self.wlock()
693 700 for f in list:
694 701 if self.dirstate.state(f) not in 'ai':
695 702 self.ui.warn(_("%s not added!\n") % f)
696 703 else:
697 704 self.dirstate.forget([f])
698 705
699 706 def remove(self, list, unlink=False, wlock=None):
700 707 if unlink:
701 708 for f in list:
702 709 try:
703 710 util.unlink(self.wjoin(f))
704 711 except OSError, inst:
705 712 if inst.errno != errno.ENOENT:
706 713 raise
707 714 if not wlock:
708 715 wlock = self.wlock()
709 716 for f in list:
710 717 p = self.wjoin(f)
711 718 if os.path.exists(p):
712 719 self.ui.warn(_("%s still exists!\n") % f)
713 720 elif self.dirstate.state(f) == 'a':
714 721 self.dirstate.forget([f])
715 722 elif f not in self.dirstate:
716 723 self.ui.warn(_("%s not tracked!\n") % f)
717 724 else:
718 725 self.dirstate.update([f], "r")
719 726
720 727 def undelete(self, list, wlock=None):
721 728 p = self.dirstate.parents()[0]
722 729 mn = self.changelog.read(p)[0]
723 730 mf = self.manifest.readflags(mn)
724 731 m = self.manifest.read(mn)
725 732 if not wlock:
726 733 wlock = self.wlock()
727 734 for f in list:
728 735 if self.dirstate.state(f) not in "r":
729 736 self.ui.warn("%s not removed!\n" % f)
730 737 else:
731 738 t = self.file(f).read(m[f])
732 739 self.wwrite(f, t)
733 740 util.set_exec(self.wjoin(f), mf[f])
734 741 self.dirstate.update([f], "n")
735 742
736 743 def copy(self, source, dest, wlock=None):
737 744 p = self.wjoin(dest)
738 745 if not os.path.exists(p):
739 746 self.ui.warn(_("%s does not exist!\n") % dest)
740 747 elif not os.path.isfile(p):
741 748 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
742 749 else:
743 750 if not wlock:
744 751 wlock = self.wlock()
745 752 if self.dirstate.state(dest) == '?':
746 753 self.dirstate.update([dest], "a")
747 754 self.dirstate.copy(source, dest)
748 755
749 756 def heads(self, start=None):
750 757 heads = self.changelog.heads(start)
751 758 # sort the output in rev descending order
752 759 heads = [(-self.changelog.rev(h), h) for h in heads]
753 760 heads.sort()
754 761 return [n for (r, n) in heads]
755 762
756 763 # branchlookup returns a dict giving a list of branches for
757 764 # each head. A branch is defined as the tag of a node or
758 765 # the branch of the node's parents. If a node has multiple
759 766 # branch tags, tags are eliminated if they are visible from other
760 767 # branch tags.
761 768 #
762 769 # So, for this graph: a->b->c->d->e
763 770 # \ /
764 771 # aa -----/
765 772 # a has tag 2.6.12
766 773 # d has tag 2.6.13
767 774 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
768 775 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
769 776 # from the list.
770 777 #
771 778 # It is possible that more than one head will have the same branch tag.
772 779 # callers need to check the result for multiple heads under the same
773 780 # branch tag if that is a problem for them (ie checkout of a specific
774 781 # branch).
775 782 #
776 783 # passing in a specific branch will limit the depth of the search
777 784 # through the parents. It won't limit the branches returned in the
778 785 # result though.
779 786 def branchlookup(self, heads=None, branch=None):
780 787 if not heads:
781 788 heads = self.heads()
782 789 headt = [ h for h in heads ]
783 790 chlog = self.changelog
784 791 branches = {}
785 792 merges = []
786 793 seenmerge = {}
787 794
788 795 # traverse the tree once for each head, recording in the branches
789 796 # dict which tags are visible from this head. The branches
790 797 # dict also records which tags are visible from each tag
791 798 # while we traverse.
792 799 while headt or merges:
793 800 if merges:
794 801 n, found = merges.pop()
795 802 visit = [n]
796 803 else:
797 804 h = headt.pop()
798 805 visit = [h]
799 806 found = [h]
800 807 seen = {}
801 808 while visit:
802 809 n = visit.pop()
803 810 if n in seen:
804 811 continue
805 812 pp = chlog.parents(n)
806 813 tags = self.nodetags(n)
807 814 if tags:
808 815 for x in tags:
809 816 if x == 'tip':
810 817 continue
811 818 for f in found:
812 819 branches.setdefault(f, {})[n] = 1
813 820 branches.setdefault(n, {})[n] = 1
814 821 break
815 822 if n not in found:
816 823 found.append(n)
817 824 if branch in tags:
818 825 continue
819 826 seen[n] = 1
820 827 if pp[1] != nullid and n not in seenmerge:
821 828 merges.append((pp[1], [x for x in found]))
822 829 seenmerge[n] = 1
823 830 if pp[0] != nullid:
824 831 visit.append(pp[0])
825 832 # traverse the branches dict, eliminating branch tags from each
826 833 # head that are visible from another branch tag for that head.
827 834 out = {}
828 835 viscache = {}
829 836 for h in heads:
830 837 def visible(node):
831 838 if node in viscache:
832 839 return viscache[node]
833 840 ret = {}
834 841 visit = [node]
835 842 while visit:
836 843 x = visit.pop()
837 844 if x in viscache:
838 845 ret.update(viscache[x])
839 846 elif x not in ret:
840 847 ret[x] = 1
841 848 if x in branches:
842 849 visit[len(visit):] = branches[x].keys()
843 850 viscache[node] = ret
844 851 return ret
845 852 if h not in branches:
846 853 continue
847 854 # O(n^2), but somewhat limited. This only searches the
848 855 # tags visible from a specific head, not all the tags in the
849 856 # whole repo.
850 857 for b in branches[h]:
851 858 vis = False
852 859 for bb in branches[h].keys():
853 860 if b != bb:
854 861 if b in visible(bb):
855 862 vis = True
856 863 break
857 864 if not vis:
858 865 l = out.setdefault(h, [])
859 866 l[len(l):] = self.nodetags(b)
860 867 return out
861 868
862 869 def branches(self, nodes):
863 870 if not nodes:
864 871 nodes = [self.changelog.tip()]
865 872 b = []
866 873 for n in nodes:
867 874 t = n
868 875 while n:
869 876 p = self.changelog.parents(n)
870 877 if p[1] != nullid or p[0] == nullid:
871 878 b.append((t, n, p[0], p[1]))
872 879 break
873 880 n = p[0]
874 881 return b
875 882
876 883 def between(self, pairs):
877 884 r = []
878 885
879 886 for top, bottom in pairs:
880 887 n, l, i = top, [], 0
881 888 f = 1
882 889
883 890 while n != bottom:
884 891 p = self.changelog.parents(n)[0]
885 892 if i == f:
886 893 l.append(n)
887 894 f = f * 2
888 895 n = p
889 896 i += 1
890 897
891 898 r.append(l)
892 899
893 900 return r
894 901
895 902 def findincoming(self, remote, base=None, heads=None, force=False):
896 903 m = self.changelog.nodemap
897 904 search = []
898 905 fetch = {}
899 906 seen = {}
900 907 seenbranch = {}
901 908 if base == None:
902 909 base = {}
903 910
904 911 if not heads:
905 912 heads = remote.heads()
906 913
907 914 if self.changelog.tip() == nullid:
908 915 if heads != [nullid]:
909 916 return [nullid]
910 917 return []
911 918
912 919 # assume we're closer to the tip than the root
913 920 # and start by examining the heads
914 921 self.ui.status(_("searching for changes\n"))
915 922
916 923 unknown = []
917 924 for h in heads:
918 925 if h not in m:
919 926 unknown.append(h)
920 927 else:
921 928 base[h] = 1
922 929
923 930 if not unknown:
924 931 return []
925 932
926 933 rep = {}
927 934 reqcnt = 0
928 935
929 936 # search through remote branches
930 937 # a 'branch' here is a linear segment of history, with four parts:
931 938 # head, root, first parent, second parent
932 939 # (a branch always has two parents (or none) by definition)
933 940 unknown = remote.branches(unknown)
934 941 while unknown:
935 942 r = []
936 943 while unknown:
937 944 n = unknown.pop(0)
938 945 if n[0] in seen:
939 946 continue
940 947
941 948 self.ui.debug(_("examining %s:%s\n")
942 949 % (short(n[0]), short(n[1])))
943 950 if n[0] == nullid:
944 951 break
945 952 if n in seenbranch:
946 953 self.ui.debug(_("branch already found\n"))
947 954 continue
948 955 if n[1] and n[1] in m: # do we know the base?
949 956 self.ui.debug(_("found incomplete branch %s:%s\n")
950 957 % (short(n[0]), short(n[1])))
951 958 search.append(n) # schedule branch range for scanning
952 959 seenbranch[n] = 1
953 960 else:
954 961 if n[1] not in seen and n[1] not in fetch:
955 962 if n[2] in m and n[3] in m:
956 963 self.ui.debug(_("found new changeset %s\n") %
957 964 short(n[1]))
958 965 fetch[n[1]] = 1 # earliest unknown
959 966 base[n[2]] = 1 # latest known
960 967 continue
961 968
962 969 for a in n[2:4]:
963 970 if a not in rep:
964 971 r.append(a)
965 972 rep[a] = 1
966 973
967 974 seen[n[0]] = 1
968 975
969 976 if r:
970 977 reqcnt += 1
971 978 self.ui.debug(_("request %d: %s\n") %
972 979 (reqcnt, " ".join(map(short, r))))
973 980 for p in range(0, len(r), 10):
974 981 for b in remote.branches(r[p:p+10]):
975 982 self.ui.debug(_("received %s:%s\n") %
976 983 (short(b[0]), short(b[1])))
977 984 if b[0] in m:
978 985 self.ui.debug(_("found base node %s\n")
979 986 % short(b[0]))
980 987 base[b[0]] = 1
981 988 elif b[0] not in seen:
982 989 unknown.append(b)
983 990
984 991 # do binary search on the branches we found
985 992 while search:
986 993 n = search.pop(0)
987 994 reqcnt += 1
988 995 l = remote.between([(n[0], n[1])])[0]
989 996 l.append(n[1])
990 997 p = n[0]
991 998 f = 1
992 999 for i in l:
993 1000 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
994 1001 if i in m:
995 1002 if f <= 2:
996 1003 self.ui.debug(_("found new branch changeset %s\n") %
997 1004 short(p))
998 1005 fetch[p] = 1
999 1006 base[i] = 1
1000 1007 else:
1001 1008 self.ui.debug(_("narrowed branch search to %s:%s\n")
1002 1009 % (short(p), short(i)))
1003 1010 search.append((p, i))
1004 1011 break
1005 1012 p, f = i, f * 2
1006 1013
1007 1014 # sanity check our fetch list
1008 1015 for f in fetch.keys():
1009 1016 if f in m:
1010 1017 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1011 1018
1012 1019 if base.keys() == [nullid]:
1013 1020 if force:
1014 1021 self.ui.warn(_("warning: repository is unrelated\n"))
1015 1022 else:
1016 1023 raise util.Abort(_("repository is unrelated"))
1017 1024
1018 1025 self.ui.note(_("found new changesets starting at ") +
1019 1026 " ".join([short(f) for f in fetch]) + "\n")
1020 1027
1021 1028 self.ui.debug(_("%d total queries\n") % reqcnt)
1022 1029
1023 1030 return fetch.keys()
1024 1031
1025 1032 def findoutgoing(self, remote, base=None, heads=None, force=False):
1026 1033 """Return list of nodes that are roots of subsets not in remote
1027 1034
1028 1035 If base dict is specified, assume that these nodes and their parents
1029 1036 exist on the remote side.
1030 1037 If a list of heads is specified, return only nodes which are heads
1031 1038 or ancestors of these heads, and return a second element which
1032 1039 contains all remote heads which get new children.
1033 1040 """
1034 1041 if base == None:
1035 1042 base = {}
1036 1043 self.findincoming(remote, base, heads, force=force)
1037 1044
1038 1045 self.ui.debug(_("common changesets up to ")
1039 1046 + " ".join(map(short, base.keys())) + "\n")
1040 1047
1041 1048 remain = dict.fromkeys(self.changelog.nodemap)
1042 1049
1043 1050 # prune everything remote has from the tree
1044 1051 del remain[nullid]
1045 1052 remove = base.keys()
1046 1053 while remove:
1047 1054 n = remove.pop(0)
1048 1055 if n in remain:
1049 1056 del remain[n]
1050 1057 for p in self.changelog.parents(n):
1051 1058 remove.append(p)
1052 1059
1053 1060 # find every node whose parents have been pruned
1054 1061 subset = []
1055 1062 # find every remote head that will get new children
1056 1063 updated_heads = {}
1057 1064 for n in remain:
1058 1065 p1, p2 = self.changelog.parents(n)
1059 1066 if p1 not in remain and p2 not in remain:
1060 1067 subset.append(n)
1061 1068 if heads:
1062 1069 if p1 in heads:
1063 1070 updated_heads[p1] = True
1064 1071 if p2 in heads:
1065 1072 updated_heads[p2] = True
1066 1073
1067 1074 # this is the set of all roots we have to push
1068 1075 if heads:
1069 1076 return subset, updated_heads.keys()
1070 1077 else:
1071 1078 return subset
1072 1079
1073 1080 def pull(self, remote, heads=None, force=False):
1074 1081 l = self.lock()
1075 1082
1076 1083 fetch = self.findincoming(remote, force=force)
1077 1084 if fetch == [nullid]:
1078 1085 self.ui.status(_("requesting all changes\n"))
1079 1086
1080 1087 if not fetch:
1081 1088 self.ui.status(_("no changes found\n"))
1082 1089 return 0
1083 1090
1084 1091 if heads is None:
1085 1092 cg = remote.changegroup(fetch, 'pull')
1086 1093 else:
1087 1094 cg = remote.changegroupsubset(fetch, heads, 'pull')
1088 1095 return self.addchangegroup(cg, 'pull')
1089 1096
1090 1097 def push(self, remote, force=False, revs=None):
1091 1098 lock = remote.lock()
1092 1099
1093 1100 base = {}
1094 1101 remote_heads = remote.heads()
1095 1102 inc = self.findincoming(remote, base, remote_heads, force=force)
1096 1103 if not force and inc:
1097 1104 self.ui.warn(_("abort: unsynced remote changes!\n"))
1098 1105 self.ui.status(_("(did you forget to sync?"
1099 1106 " use push -f to force)\n"))
1100 1107 return 1
1101 1108
1102 1109 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1103 1110 if revs is not None:
1104 1111 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1105 1112 else:
1106 1113 bases, heads = update, self.changelog.heads()
1107 1114
1108 1115 if not bases:
1109 1116 self.ui.status(_("no changes found\n"))
1110 1117 return 1
1111 1118 elif not force:
1112 1119 # FIXME we don't properly detect creation of new heads
1113 1120 # in the push -r case, assume the user knows what he's doing
1114 1121 if not revs and len(remote_heads) < len(heads) \
1115 1122 and remote_heads != [nullid]:
1116 1123 self.ui.warn(_("abort: push creates new remote branches!\n"))
1117 1124 self.ui.status(_("(did you forget to merge?"
1118 1125 " use push -f to force)\n"))
1119 1126 return 1
1120 1127
1121 1128 if revs is None:
1122 1129 cg = self.changegroup(update, 'push')
1123 1130 else:
1124 1131 cg = self.changegroupsubset(update, revs, 'push')
1125 1132 return remote.addchangegroup(cg, 'push')
1126 1133
1127 1134 def changegroupsubset(self, bases, heads, source):
1128 1135 """This function generates a changegroup consisting of all the nodes
1129 1136 that are descendents of any of the bases, and ancestors of any of
1130 1137 the heads.
1131 1138
1132 1139 It is fairly complex as determining which filenodes and which
1133 1140 manifest nodes need to be included for the changeset to be complete
1134 1141 is non-trivial.
1135 1142
1136 1143 Another wrinkle is doing the reverse, figuring out which changeset in
1137 1144 the changegroup a particular filenode or manifestnode belongs to."""
1138 1145
1139 1146 self.hook('preoutgoing', throw=True, source=source)
1140 1147
1141 1148 # Set up some initial variables
1142 1149 # Make it easy to refer to self.changelog
1143 1150 cl = self.changelog
1144 1151 # msng is short for missing - compute the list of changesets in this
1145 1152 # changegroup.
1146 1153 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1147 1154 # Some bases may turn out to be superfluous, and some heads may be
1148 1155 # too. nodesbetween will return the minimal set of bases and heads
1149 1156 # necessary to re-create the changegroup.
1150 1157
1151 1158 # Known heads are the list of heads that it is assumed the recipient
1152 1159 # of this changegroup will know about.
1153 1160 knownheads = {}
1154 1161 # We assume that all parents of bases are known heads.
1155 1162 for n in bases:
1156 1163 for p in cl.parents(n):
1157 1164 if p != nullid:
1158 1165 knownheads[p] = 1
1159 1166 knownheads = knownheads.keys()
1160 1167 if knownheads:
1161 1168 # Now that we know what heads are known, we can compute which
1162 1169 # changesets are known. The recipient must know about all
1163 1170 # changesets required to reach the known heads from the null
1164 1171 # changeset.
1165 1172 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1166 1173 junk = None
1167 1174 # Transform the list into an ersatz set.
1168 1175 has_cl_set = dict.fromkeys(has_cl_set)
1169 1176 else:
1170 1177 # If there were no known heads, the recipient cannot be assumed to
1171 1178 # know about any changesets.
1172 1179 has_cl_set = {}
1173 1180
1174 1181 # Make it easy to refer to self.manifest
1175 1182 mnfst = self.manifest
1176 1183 # We don't know which manifests are missing yet
1177 1184 msng_mnfst_set = {}
1178 1185 # Nor do we know which filenodes are missing.
1179 1186 msng_filenode_set = {}
1180 1187
1181 1188 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1182 1189 junk = None
1183 1190
1184 1191 # A changeset always belongs to itself, so the changenode lookup
1185 1192 # function for a changenode is identity.
1186 1193 def identity(x):
1187 1194 return x
1188 1195
1189 1196 # A function generating function. Sets up an environment for the
1190 1197 # inner function.
1191 1198 def cmp_by_rev_func(revlog):
1192 1199 # Compare two nodes by their revision number in the environment's
1193 1200 # revision history. Since the revision number both represents the
1194 1201 # most efficient order to read the nodes in, and represents a
1195 1202 # topological sorting of the nodes, this function is often useful.
1196 1203 def cmp_by_rev(a, b):
1197 1204 return cmp(revlog.rev(a), revlog.rev(b))
1198 1205 return cmp_by_rev
1199 1206
1200 1207 # If we determine that a particular file or manifest node must be a
1201 1208 # node that the recipient of the changegroup will already have, we can
1202 1209 # also assume the recipient will have all the parents. This function
1203 1210 # prunes them from the set of missing nodes.
1204 1211 def prune_parents(revlog, hasset, msngset):
1205 1212 haslst = hasset.keys()
1206 1213 haslst.sort(cmp_by_rev_func(revlog))
1207 1214 for node in haslst:
1208 1215 parentlst = [p for p in revlog.parents(node) if p != nullid]
1209 1216 while parentlst:
1210 1217 n = parentlst.pop()
1211 1218 if n not in hasset:
1212 1219 hasset[n] = 1
1213 1220 p = [p for p in revlog.parents(n) if p != nullid]
1214 1221 parentlst.extend(p)
1215 1222 for n in hasset:
1216 1223 msngset.pop(n, None)
1217 1224
1218 1225 # This is a function generating function used to set up an environment
1219 1226 # for the inner function to execute in.
1220 1227 def manifest_and_file_collector(changedfileset):
1221 1228 # This is an information gathering function that gathers
1222 1229 # information from each changeset node that goes out as part of
1223 1230 # the changegroup. The information gathered is a list of which
1224 1231 # manifest nodes are potentially required (the recipient may
1225 1232 # already have them) and total list of all files which were
1226 1233 # changed in any changeset in the changegroup.
1227 1234 #
1228 1235 # We also remember the first changenode we saw any manifest
1229 1236 # referenced by so we can later determine which changenode 'owns'
1230 1237 # the manifest.
1231 1238 def collect_manifests_and_files(clnode):
1232 1239 c = cl.read(clnode)
1233 1240 for f in c[3]:
1234 1241 # This is to make sure we only have one instance of each
1235 1242 # filename string for each filename.
1236 1243 changedfileset.setdefault(f, f)
1237 1244 msng_mnfst_set.setdefault(c[0], clnode)
1238 1245 return collect_manifests_and_files
1239 1246
1240 1247 # Figure out which manifest nodes (of the ones we think might be part
1241 1248 # of the changegroup) the recipient must know about and remove them
1242 1249 # from the changegroup.
1243 1250 def prune_manifests():
1244 1251 has_mnfst_set = {}
1245 1252 for n in msng_mnfst_set:
1246 1253 # If a 'missing' manifest thinks it belongs to a changenode
1247 1254 # the recipient is assumed to have, obviously the recipient
1248 1255 # must have that manifest.
1249 1256 linknode = cl.node(mnfst.linkrev(n))
1250 1257 if linknode in has_cl_set:
1251 1258 has_mnfst_set[n] = 1
1252 1259 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1253 1260
1254 1261 # Use the information collected in collect_manifests_and_files to say
1255 1262 # which changenode any manifestnode belongs to.
1256 1263 def lookup_manifest_link(mnfstnode):
1257 1264 return msng_mnfst_set[mnfstnode]
1258 1265
1259 1266 # A function generating function that sets up the initial environment
1260 1267 # the inner function.
1261 1268 def filenode_collector(changedfiles):
1262 1269 next_rev = [0]
1263 1270 # This gathers information from each manifestnode included in the
1264 1271 # changegroup about which filenodes the manifest node references
1265 1272 # so we can include those in the changegroup too.
1266 1273 #
1267 1274 # It also remembers which changenode each filenode belongs to. It
1268 1275 # does this by assuming the a filenode belongs to the changenode
1269 1276 # the first manifest that references it belongs to.
1270 1277 def collect_msng_filenodes(mnfstnode):
1271 1278 r = mnfst.rev(mnfstnode)
1272 1279 if r == next_rev[0]:
1273 1280 # If the last rev we looked at was the one just previous,
1274 1281 # we only need to see a diff.
1275 1282 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1276 1283 # For each line in the delta
1277 1284 for dline in delta.splitlines():
1278 1285 # get the filename and filenode for that line
1279 1286 f, fnode = dline.split('\0')
1280 1287 fnode = bin(fnode[:40])
1281 1288 f = changedfiles.get(f, None)
1282 1289 # And if the file is in the list of files we care
1283 1290 # about.
1284 1291 if f is not None:
1285 1292 # Get the changenode this manifest belongs to
1286 1293 clnode = msng_mnfst_set[mnfstnode]
1287 1294 # Create the set of filenodes for the file if
1288 1295 # there isn't one already.
1289 1296 ndset = msng_filenode_set.setdefault(f, {})
1290 1297 # And set the filenode's changelog node to the
1291 1298 # manifest's if it hasn't been set already.
1292 1299 ndset.setdefault(fnode, clnode)
1293 1300 else:
1294 1301 # Otherwise we need a full manifest.
1295 1302 m = mnfst.read(mnfstnode)
1296 1303 # For every file in we care about.
1297 1304 for f in changedfiles:
1298 1305 fnode = m.get(f, None)
1299 1306 # If it's in the manifest
1300 1307 if fnode is not None:
1301 1308 # See comments above.
1302 1309 clnode = msng_mnfst_set[mnfstnode]
1303 1310 ndset = msng_filenode_set.setdefault(f, {})
1304 1311 ndset.setdefault(fnode, clnode)
1305 1312 # Remember the revision we hope to see next.
1306 1313 next_rev[0] = r + 1
1307 1314 return collect_msng_filenodes
1308 1315
1309 1316 # We have a list of filenodes we think we need for a file, lets remove
1310 1317 # all those we now the recipient must have.
1311 1318 def prune_filenodes(f, filerevlog):
1312 1319 msngset = msng_filenode_set[f]
1313 1320 hasset = {}
1314 1321 # If a 'missing' filenode thinks it belongs to a changenode we
1315 1322 # assume the recipient must have, then the recipient must have
1316 1323 # that filenode.
1317 1324 for n in msngset:
1318 1325 clnode = cl.node(filerevlog.linkrev(n))
1319 1326 if clnode in has_cl_set:
1320 1327 hasset[n] = 1
1321 1328 prune_parents(filerevlog, hasset, msngset)
1322 1329
1323 1330 # A function generator function that sets up the a context for the
1324 1331 # inner function.
1325 1332 def lookup_filenode_link_func(fname):
1326 1333 msngset = msng_filenode_set[fname]
1327 1334 # Lookup the changenode the filenode belongs to.
1328 1335 def lookup_filenode_link(fnode):
1329 1336 return msngset[fnode]
1330 1337 return lookup_filenode_link
1331 1338
1332 1339 # Now that we have all theses utility functions to help out and
1333 1340 # logically divide up the task, generate the group.
1334 1341 def gengroup():
1335 1342 # The set of changed files starts empty.
1336 1343 changedfiles = {}
1337 1344 # Create a changenode group generator that will call our functions
1338 1345 # back to lookup the owning changenode and collect information.
1339 1346 group = cl.group(msng_cl_lst, identity,
1340 1347 manifest_and_file_collector(changedfiles))
1341 1348 for chnk in group:
1342 1349 yield chnk
1343 1350
1344 1351 # The list of manifests has been collected by the generator
1345 1352 # calling our functions back.
1346 1353 prune_manifests()
1347 1354 msng_mnfst_lst = msng_mnfst_set.keys()
1348 1355 # Sort the manifestnodes by revision number.
1349 1356 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1350 1357 # Create a generator for the manifestnodes that calls our lookup
1351 1358 # and data collection functions back.
1352 1359 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1353 1360 filenode_collector(changedfiles))
1354 1361 for chnk in group:
1355 1362 yield chnk
1356 1363
1357 1364 # These are no longer needed, dereference and toss the memory for
1358 1365 # them.
1359 1366 msng_mnfst_lst = None
1360 1367 msng_mnfst_set.clear()
1361 1368
1362 1369 changedfiles = changedfiles.keys()
1363 1370 changedfiles.sort()
1364 1371 # Go through all our files in order sorted by name.
1365 1372 for fname in changedfiles:
1366 1373 filerevlog = self.file(fname)
1367 1374 # Toss out the filenodes that the recipient isn't really
1368 1375 # missing.
1369 1376 if msng_filenode_set.has_key(fname):
1370 1377 prune_filenodes(fname, filerevlog)
1371 1378 msng_filenode_lst = msng_filenode_set[fname].keys()
1372 1379 else:
1373 1380 msng_filenode_lst = []
1374 1381 # If any filenodes are left, generate the group for them,
1375 1382 # otherwise don't bother.
1376 1383 if len(msng_filenode_lst) > 0:
1377 1384 yield changegroup.genchunk(fname)
1378 1385 # Sort the filenodes by their revision #
1379 1386 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1380 1387 # Create a group generator and only pass in a changenode
1381 1388 # lookup function as we need to collect no information
1382 1389 # from filenodes.
1383 1390 group = filerevlog.group(msng_filenode_lst,
1384 1391 lookup_filenode_link_func(fname))
1385 1392 for chnk in group:
1386 1393 yield chnk
1387 1394 if msng_filenode_set.has_key(fname):
1388 1395 # Don't need this anymore, toss it to free memory.
1389 1396 del msng_filenode_set[fname]
1390 1397 # Signal that no more groups are left.
1391 1398 yield changegroup.closechunk()
1392 1399
1393 1400 if msng_cl_lst:
1394 1401 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1395 1402
1396 1403 return util.chunkbuffer(gengroup())
1397 1404
1398 1405 def changegroup(self, basenodes, source):
1399 1406 """Generate a changegroup of all nodes that we have that a recipient
1400 1407 doesn't.
1401 1408
1402 1409 This is much easier than the previous function as we can assume that
1403 1410 the recipient has any changenode we aren't sending them."""
1404 1411
1405 1412 self.hook('preoutgoing', throw=True, source=source)
1406 1413
1407 1414 cl = self.changelog
1408 1415 nodes = cl.nodesbetween(basenodes, None)[0]
1409 1416 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1410 1417
1411 1418 def identity(x):
1412 1419 return x
1413 1420
1414 1421 def gennodelst(revlog):
1415 1422 for r in xrange(0, revlog.count()):
1416 1423 n = revlog.node(r)
1417 1424 if revlog.linkrev(n) in revset:
1418 1425 yield n
1419 1426
1420 1427 def changed_file_collector(changedfileset):
1421 1428 def collect_changed_files(clnode):
1422 1429 c = cl.read(clnode)
1423 1430 for fname in c[3]:
1424 1431 changedfileset[fname] = 1
1425 1432 return collect_changed_files
1426 1433
1427 1434 def lookuprevlink_func(revlog):
1428 1435 def lookuprevlink(n):
1429 1436 return cl.node(revlog.linkrev(n))
1430 1437 return lookuprevlink
1431 1438
1432 1439 def gengroup():
1433 1440 # construct a list of all changed files
1434 1441 changedfiles = {}
1435 1442
1436 1443 for chnk in cl.group(nodes, identity,
1437 1444 changed_file_collector(changedfiles)):
1438 1445 yield chnk
1439 1446 changedfiles = changedfiles.keys()
1440 1447 changedfiles.sort()
1441 1448
1442 1449 mnfst = self.manifest
1443 1450 nodeiter = gennodelst(mnfst)
1444 1451 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1445 1452 yield chnk
1446 1453
1447 1454 for fname in changedfiles:
1448 1455 filerevlog = self.file(fname)
1449 1456 nodeiter = gennodelst(filerevlog)
1450 1457 nodeiter = list(nodeiter)
1451 1458 if nodeiter:
1452 1459 yield changegroup.genchunk(fname)
1453 1460 lookup = lookuprevlink_func(filerevlog)
1454 1461 for chnk in filerevlog.group(nodeiter, lookup):
1455 1462 yield chnk
1456 1463
1457 1464 yield changegroup.closechunk()
1458 1465
1459 1466 if nodes:
1460 1467 self.hook('outgoing', node=hex(nodes[0]), source=source)
1461 1468
1462 1469 return util.chunkbuffer(gengroup())
1463 1470
1464 1471 def addchangegroup(self, source, srctype):
1465 1472 """add changegroup to repo.
1466 1473 returns number of heads modified or added + 1."""
1467 1474
1468 1475 def csmap(x):
1469 1476 self.ui.debug(_("add changeset %s\n") % short(x))
1470 1477 return cl.count()
1471 1478
1472 1479 def revmap(x):
1473 1480 return cl.rev(x)
1474 1481
1475 1482 if not source:
1476 1483 return 0
1477 1484
1478 1485 self.hook('prechangegroup', throw=True, source=srctype)
1479 1486
1480 1487 changesets = files = revisions = 0
1481 1488
1482 1489 tr = self.transaction()
1483 1490
1484 1491 # write changelog and manifest data to temp files so
1485 1492 # concurrent readers will not see inconsistent view
1486 1493 cl = None
1487 1494 try:
1488 1495 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1489 1496
1490 1497 oldheads = len(cl.heads())
1491 1498
1492 1499 # pull off the changeset group
1493 1500 self.ui.status(_("adding changesets\n"))
1494 1501 co = cl.tip()
1495 1502 chunkiter = changegroup.chunkiter(source)
1496 1503 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1497 1504 cnr, cor = map(cl.rev, (cn, co))
1498 1505 if cn == nullid:
1499 1506 cnr = cor
1500 1507 changesets = cnr - cor
1501 1508
1502 1509 mf = None
1503 1510 try:
1504 1511 mf = appendfile.appendmanifest(self.opener,
1505 1512 self.manifest.version)
1506 1513
1507 1514 # pull off the manifest group
1508 1515 self.ui.status(_("adding manifests\n"))
1509 1516 mm = mf.tip()
1510 1517 chunkiter = changegroup.chunkiter(source)
1511 1518 mo = mf.addgroup(chunkiter, revmap, tr)
1512 1519
1513 1520 # process the files
1514 1521 self.ui.status(_("adding file changes\n"))
1515 1522 while 1:
1516 1523 f = changegroup.getchunk(source)
1517 1524 if not f:
1518 1525 break
1519 1526 self.ui.debug(_("adding %s revisions\n") % f)
1520 1527 fl = self.file(f)
1521 1528 o = fl.count()
1522 1529 chunkiter = changegroup.chunkiter(source)
1523 1530 n = fl.addgroup(chunkiter, revmap, tr)
1524 1531 revisions += fl.count() - o
1525 1532 files += 1
1526 1533
1527 1534 # write order here is important so concurrent readers will see
1528 1535 # consistent view of repo
1529 1536 mf.writedata()
1530 1537 finally:
1531 1538 if mf:
1532 1539 mf.cleanup()
1533 1540 cl.writedata()
1534 1541 finally:
1535 1542 if cl:
1536 1543 cl.cleanup()
1537 1544
1538 1545 # make changelog and manifest see real files again
1539 1546 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1540 1547 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1541 1548 self.changelog.checkinlinesize(tr)
1542 1549 self.manifest.checkinlinesize(tr)
1543 1550
1544 1551 newheads = len(self.changelog.heads())
1545 1552 heads = ""
1546 1553 if oldheads and newheads > oldheads:
1547 1554 heads = _(" (+%d heads)") % (newheads - oldheads)
1548 1555
1549 1556 self.ui.status(_("added %d changesets"
1550 1557 " with %d changes to %d files%s\n")
1551 1558 % (changesets, revisions, files, heads))
1552 1559
1553 1560 if changesets > 0:
1554 1561 self.hook('pretxnchangegroup', throw=True,
1555 1562 node=hex(self.changelog.node(cor+1)), source=srctype)
1556 1563
1557 1564 tr.close()
1558 1565
1559 1566 if changesets > 0:
1560 1567 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1561 1568 source=srctype)
1562 1569
1563 1570 for i in range(cor + 1, cnr + 1):
1564 1571 self.hook("incoming", node=hex(self.changelog.node(i)),
1565 1572 source=srctype)
1566 1573
1567 1574 return newheads - oldheads + 1
1568 1575
1569 1576 def update(self, node, allow=False, force=False, choose=None,
1570 1577 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1571 1578 pl = self.dirstate.parents()
1572 1579 if not force and pl[1] != nullid:
1573 1580 raise util.Abort(_("outstanding uncommitted merges"))
1574 1581
1575 1582 err = False
1576 1583
1577 1584 p1, p2 = pl[0], node
1578 1585 pa = self.changelog.ancestor(p1, p2)
1579 1586 m1n = self.changelog.read(p1)[0]
1580 1587 m2n = self.changelog.read(p2)[0]
1581 1588 man = self.manifest.ancestor(m1n, m2n)
1582 1589 m1 = self.manifest.read(m1n)
1583 1590 mf1 = self.manifest.readflags(m1n)
1584 1591 m2 = self.manifest.read(m2n).copy()
1585 1592 mf2 = self.manifest.readflags(m2n)
1586 1593 ma = self.manifest.read(man)
1587 1594 mfa = self.manifest.readflags(man)
1588 1595
1589 1596 modified, added, removed, deleted, unknown = self.changes()
1590 1597
1591 1598 # is this a jump, or a merge? i.e. is there a linear path
1592 1599 # from p1 to p2?
1593 1600 linear_path = (pa == p1 or pa == p2)
1594 1601
1595 1602 if allow and linear_path:
1596 1603 raise util.Abort(_("there is nothing to merge, "
1597 1604 "just use 'hg update'"))
1598 1605 if allow and not forcemerge:
1599 1606 if modified or added or removed:
1600 1607 raise util.Abort(_("outstanding uncommitted changes"))
1601 1608
1602 1609 if not forcemerge and not force:
1603 1610 for f in unknown:
1604 1611 if f in m2:
1605 1612 t1 = self.wread(f)
1606 1613 t2 = self.file(f).read(m2[f])
1607 1614 if cmp(t1, t2) != 0:
1608 1615 raise util.Abort(_("'%s' already exists in the working"
1609 1616 " dir and differs from remote") % f)
1610 1617
1611 1618 # resolve the manifest to determine which files
1612 1619 # we care about merging
1613 1620 self.ui.note(_("resolving manifests\n"))
1614 1621 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1615 1622 (force, allow, moddirstate, linear_path))
1616 1623 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1617 1624 (short(man), short(m1n), short(m2n)))
1618 1625
1619 1626 merge = {}
1620 1627 get = {}
1621 1628 remove = []
1622 1629
1623 1630 # construct a working dir manifest
1624 1631 mw = m1.copy()
1625 1632 mfw = mf1.copy()
1626 1633 umap = dict.fromkeys(unknown)
1627 1634
1628 1635 for f in added + modified + unknown:
1629 1636 mw[f] = ""
1630 1637 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1631 1638
1632 1639 if moddirstate and not wlock:
1633 1640 wlock = self.wlock()
1634 1641
1635 1642 for f in deleted + removed:
1636 1643 if f in mw:
1637 1644 del mw[f]
1638 1645
1639 1646 # If we're jumping between revisions (as opposed to merging),
1640 1647 # and if neither the working directory nor the target rev has
1641 1648 # the file, then we need to remove it from the dirstate, to
1642 1649 # prevent the dirstate from listing the file when it is no
1643 1650 # longer in the manifest.
1644 1651 if moddirstate and linear_path and f not in m2:
1645 1652 self.dirstate.forget((f,))
1646 1653
1647 1654 # Compare manifests
1648 1655 for f, n in mw.iteritems():
1649 1656 if choose and not choose(f):
1650 1657 continue
1651 1658 if f in m2:
1652 1659 s = 0
1653 1660
1654 1661 # is the wfile new since m1, and match m2?
1655 1662 if f not in m1:
1656 1663 t1 = self.wread(f)
1657 1664 t2 = self.file(f).read(m2[f])
1658 1665 if cmp(t1, t2) == 0:
1659 1666 n = m2[f]
1660 1667 del t1, t2
1661 1668
1662 1669 # are files different?
1663 1670 if n != m2[f]:
1664 1671 a = ma.get(f, nullid)
1665 1672 # are both different from the ancestor?
1666 1673 if n != a and m2[f] != a:
1667 1674 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1668 1675 # merge executable bits
1669 1676 # "if we changed or they changed, change in merge"
1670 1677 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1671 1678 mode = ((a^b) | (a^c)) ^ a
1672 1679 merge[f] = (m1.get(f, nullid), m2[f], mode)
1673 1680 s = 1
1674 1681 # are we clobbering?
1675 1682 # is remote's version newer?
1676 1683 # or are we going back in time?
1677 1684 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1678 1685 self.ui.debug(_(" remote %s is newer, get\n") % f)
1679 1686 get[f] = m2[f]
1680 1687 s = 1
1681 1688 elif f in umap or f in added:
1682 1689 # this unknown file is the same as the checkout
1683 1690 # we need to reset the dirstate if the file was added
1684 1691 get[f] = m2[f]
1685 1692
1686 1693 if not s and mfw[f] != mf2[f]:
1687 1694 if force:
1688 1695 self.ui.debug(_(" updating permissions for %s\n") % f)
1689 1696 util.set_exec(self.wjoin(f), mf2[f])
1690 1697 else:
1691 1698 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1692 1699 mode = ((a^b) | (a^c)) ^ a
1693 1700 if mode != b:
1694 1701 self.ui.debug(_(" updating permissions for %s\n")
1695 1702 % f)
1696 1703 util.set_exec(self.wjoin(f), mode)
1697 1704 del m2[f]
1698 1705 elif f in ma:
1699 1706 if n != ma[f]:
1700 1707 r = _("d")
1701 1708 if not force and (linear_path or allow):
1702 1709 r = self.ui.prompt(
1703 1710 (_(" local changed %s which remote deleted\n") % f) +
1704 1711 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1705 1712 if r == _("d"):
1706 1713 remove.append(f)
1707 1714 else:
1708 1715 self.ui.debug(_("other deleted %s\n") % f)
1709 1716 remove.append(f) # other deleted it
1710 1717 else:
1711 1718 # file is created on branch or in working directory
1712 1719 if force and f not in umap:
1713 1720 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1714 1721 remove.append(f)
1715 1722 elif n == m1.get(f, nullid): # same as parent
1716 1723 if p2 == pa: # going backwards?
1717 1724 self.ui.debug(_("remote deleted %s\n") % f)
1718 1725 remove.append(f)
1719 1726 else:
1720 1727 self.ui.debug(_("local modified %s, keeping\n") % f)
1721 1728 else:
1722 1729 self.ui.debug(_("working dir created %s, keeping\n") % f)
1723 1730
1724 1731 for f, n in m2.iteritems():
1725 1732 if choose and not choose(f):
1726 1733 continue
1727 1734 if f[0] == "/":
1728 1735 continue
1729 1736 if f in ma and n != ma[f]:
1730 1737 r = _("k")
1731 1738 if not force and (linear_path or allow):
1732 1739 r = self.ui.prompt(
1733 1740 (_("remote changed %s which local deleted\n") % f) +
1734 1741 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1735 1742 if r == _("k"):
1736 1743 get[f] = n
1737 1744 elif f not in ma:
1738 1745 self.ui.debug(_("remote created %s\n") % f)
1739 1746 get[f] = n
1740 1747 else:
1741 1748 if force or p2 == pa: # going backwards?
1742 1749 self.ui.debug(_("local deleted %s, recreating\n") % f)
1743 1750 get[f] = n
1744 1751 else:
1745 1752 self.ui.debug(_("local deleted %s\n") % f)
1746 1753
1747 1754 del mw, m1, m2, ma
1748 1755
1749 1756 if force:
1750 1757 for f in merge:
1751 1758 get[f] = merge[f][1]
1752 1759 merge = {}
1753 1760
1754 1761 if linear_path or force:
1755 1762 # we don't need to do any magic, just jump to the new rev
1756 1763 branch_merge = False
1757 1764 p1, p2 = p2, nullid
1758 1765 else:
1759 1766 if not allow:
1760 1767 self.ui.status(_("this update spans a branch"
1761 1768 " affecting the following files:\n"))
1762 1769 fl = merge.keys() + get.keys()
1763 1770 fl.sort()
1764 1771 for f in fl:
1765 1772 cf = ""
1766 1773 if f in merge:
1767 1774 cf = _(" (resolve)")
1768 1775 self.ui.status(" %s%s\n" % (f, cf))
1769 1776 self.ui.warn(_("aborting update spanning branches!\n"))
1770 1777 self.ui.status(_("(use 'hg merge' to merge across branches"
1771 1778 " or 'hg update -C' to lose changes)\n"))
1772 1779 return 1
1773 1780 branch_merge = True
1774 1781
1775 1782 xp1 = hex(p1)
1776 1783 xp2 = hex(p2)
1777 1784 if p2 == nullid: xxp2 = ''
1778 1785 else: xxp2 = xp2
1779 1786
1780 1787 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1781 1788
1782 1789 # get the files we don't need to change
1783 1790 files = get.keys()
1784 1791 files.sort()
1785 1792 for f in files:
1786 1793 if f[0] == "/":
1787 1794 continue
1788 1795 self.ui.note(_("getting %s\n") % f)
1789 1796 t = self.file(f).read(get[f])
1790 1797 self.wwrite(f, t)
1791 1798 util.set_exec(self.wjoin(f), mf2[f])
1792 1799 if moddirstate:
1793 1800 if branch_merge:
1794 1801 self.dirstate.update([f], 'n', st_mtime=-1)
1795 1802 else:
1796 1803 self.dirstate.update([f], 'n')
1797 1804
1798 1805 # merge the tricky bits
1799 1806 failedmerge = []
1800 1807 files = merge.keys()
1801 1808 files.sort()
1802 1809 for f in files:
1803 1810 self.ui.status(_("merging %s\n") % f)
1804 1811 my, other, flag = merge[f]
1805 1812 ret = self.merge3(f, my, other, xp1, xp2)
1806 1813 if ret:
1807 1814 err = True
1808 1815 failedmerge.append(f)
1809 1816 util.set_exec(self.wjoin(f), flag)
1810 1817 if moddirstate:
1811 1818 if branch_merge:
1812 1819 # We've done a branch merge, mark this file as merged
1813 1820 # so that we properly record the merger later
1814 1821 self.dirstate.update([f], 'm')
1815 1822 else:
1816 1823 # We've update-merged a locally modified file, so
1817 1824 # we set the dirstate to emulate a normal checkout
1818 1825 # of that file some time in the past. Thus our
1819 1826 # merge will appear as a normal local file
1820 1827 # modification.
1821 1828 f_len = len(self.file(f).read(other))
1822 1829 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1823 1830
1824 1831 remove.sort()
1825 1832 for f in remove:
1826 1833 self.ui.note(_("removing %s\n") % f)
1827 1834 util.audit_path(f)
1828 1835 try:
1829 1836 util.unlink(self.wjoin(f))
1830 1837 except OSError, inst:
1831 1838 if inst.errno != errno.ENOENT:
1832 1839 self.ui.warn(_("update failed to remove %s: %s!\n") %
1833 1840 (f, inst.strerror))
1834 1841 if moddirstate:
1835 1842 if branch_merge:
1836 1843 self.dirstate.update(remove, 'r')
1837 1844 else:
1838 1845 self.dirstate.forget(remove)
1839 1846
1840 1847 if moddirstate:
1841 1848 self.dirstate.setparents(p1, p2)
1842 1849
1843 1850 if show_stats:
1844 1851 stats = ((len(get), _("updated")),
1845 1852 (len(merge) - len(failedmerge), _("merged")),
1846 1853 (len(remove), _("removed")),
1847 1854 (len(failedmerge), _("unresolved")))
1848 1855 note = ", ".join([_("%d files %s") % s for s in stats])
1849 1856 self.ui.status("%s\n" % note)
1850 1857 if moddirstate:
1851 1858 if branch_merge:
1852 1859 if failedmerge:
1853 1860 self.ui.status(_("There are unresolved merges,"
1854 1861 " you can redo the full merge using:\n"
1855 1862 " hg update -C %s\n"
1856 1863 " hg merge %s\n"
1857 1864 % (self.changelog.rev(p1),
1858 1865 self.changelog.rev(p2))))
1859 1866 else:
1860 1867 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1861 1868 elif failedmerge:
1862 1869 self.ui.status(_("There are unresolved merges with"
1863 1870 " locally modified files.\n"))
1864 1871
1865 1872 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1866 1873 return err
1867 1874
1868 1875 def merge3(self, fn, my, other, p1, p2):
1869 1876 """perform a 3-way merge in the working directory"""
1870 1877
1871 1878 def temp(prefix, node):
1872 1879 pre = "%s~%s." % (os.path.basename(fn), prefix)
1873 1880 (fd, name) = tempfile.mkstemp(prefix=pre)
1874 1881 f = os.fdopen(fd, "wb")
1875 1882 self.wwrite(fn, fl.read(node), f)
1876 1883 f.close()
1877 1884 return name
1878 1885
1879 1886 fl = self.file(fn)
1880 1887 base = fl.ancestor(my, other)
1881 1888 a = self.wjoin(fn)
1882 1889 b = temp("base", base)
1883 1890 c = temp("other", other)
1884 1891
1885 1892 self.ui.note(_("resolving %s\n") % fn)
1886 1893 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1887 1894 (fn, short(my), short(other), short(base)))
1888 1895
1889 1896 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1890 1897 or "hgmerge")
1891 1898 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1892 1899 environ={'HG_FILE': fn,
1893 1900 'HG_MY_NODE': p1,
1894 1901 'HG_OTHER_NODE': p2,
1895 1902 'HG_FILE_MY_NODE': hex(my),
1896 1903 'HG_FILE_OTHER_NODE': hex(other),
1897 1904 'HG_FILE_BASE_NODE': hex(base)})
1898 1905 if r:
1899 1906 self.ui.warn(_("merging %s failed!\n") % fn)
1900 1907
1901 1908 os.unlink(b)
1902 1909 os.unlink(c)
1903 1910 return r
1904 1911
1905 1912 def verify(self):
1906 1913 filelinkrevs = {}
1907 1914 filenodes = {}
1908 1915 changesets = revisions = files = 0
1909 1916 errors = [0]
1910 1917 warnings = [0]
1911 1918 neededmanifests = {}
1912 1919
1913 1920 def err(msg):
1914 1921 self.ui.warn(msg + "\n")
1915 1922 errors[0] += 1
1916 1923
1917 1924 def warn(msg):
1918 1925 self.ui.warn(msg + "\n")
1919 1926 warnings[0] += 1
1920 1927
1921 1928 def checksize(obj, name):
1922 1929 d = obj.checksize()
1923 1930 if d[0]:
1924 1931 err(_("%s data length off by %d bytes") % (name, d[0]))
1925 1932 if d[1]:
1926 1933 err(_("%s index contains %d extra bytes") % (name, d[1]))
1927 1934
1928 1935 def checkversion(obj, name):
1929 1936 if obj.version != revlog.REVLOGV0:
1930 1937 if not revlogv1:
1931 1938 warn(_("warning: `%s' uses revlog format 1") % name)
1932 1939 elif revlogv1:
1933 1940 warn(_("warning: `%s' uses revlog format 0") % name)
1934 1941
1935 1942 revlogv1 = self.revlogversion != revlog.REVLOGV0
1936 1943 if self.ui.verbose or revlogv1 != self.revlogv1:
1937 1944 self.ui.status(_("repository uses revlog format %d\n") %
1938 1945 (revlogv1 and 1 or 0))
1939 1946
1940 1947 seen = {}
1941 1948 self.ui.status(_("checking changesets\n"))
1942 1949 checksize(self.changelog, "changelog")
1943 1950
1944 1951 for i in range(self.changelog.count()):
1945 1952 changesets += 1
1946 1953 n = self.changelog.node(i)
1947 1954 l = self.changelog.linkrev(n)
1948 1955 if l != i:
1949 1956 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1950 1957 if n in seen:
1951 1958 err(_("duplicate changeset at revision %d") % i)
1952 1959 seen[n] = 1
1953 1960
1954 1961 for p in self.changelog.parents(n):
1955 1962 if p not in self.changelog.nodemap:
1956 1963 err(_("changeset %s has unknown parent %s") %
1957 1964 (short(n), short(p)))
1958 1965 try:
1959 1966 changes = self.changelog.read(n)
1960 1967 except KeyboardInterrupt:
1961 1968 self.ui.warn(_("interrupted"))
1962 1969 raise
1963 1970 except Exception, inst:
1964 1971 err(_("unpacking changeset %s: %s") % (short(n), inst))
1965 1972 continue
1966 1973
1967 1974 neededmanifests[changes[0]] = n
1968 1975
1969 1976 for f in changes[3]:
1970 1977 filelinkrevs.setdefault(f, []).append(i)
1971 1978
1972 1979 seen = {}
1973 1980 self.ui.status(_("checking manifests\n"))
1974 1981 checkversion(self.manifest, "manifest")
1975 1982 checksize(self.manifest, "manifest")
1976 1983
1977 1984 for i in range(self.manifest.count()):
1978 1985 n = self.manifest.node(i)
1979 1986 l = self.manifest.linkrev(n)
1980 1987
1981 1988 if l < 0 or l >= self.changelog.count():
1982 1989 err(_("bad manifest link (%d) at revision %d") % (l, i))
1983 1990
1984 1991 if n in neededmanifests:
1985 1992 del neededmanifests[n]
1986 1993
1987 1994 if n in seen:
1988 1995 err(_("duplicate manifest at revision %d") % i)
1989 1996
1990 1997 seen[n] = 1
1991 1998
1992 1999 for p in self.manifest.parents(n):
1993 2000 if p not in self.manifest.nodemap:
1994 2001 err(_("manifest %s has unknown parent %s") %
1995 2002 (short(n), short(p)))
1996 2003
1997 2004 try:
1998 2005 delta = mdiff.patchtext(self.manifest.delta(n))
1999 2006 except KeyboardInterrupt:
2000 2007 self.ui.warn(_("interrupted"))
2001 2008 raise
2002 2009 except Exception, inst:
2003 2010 err(_("unpacking manifest %s: %s") % (short(n), inst))
2004 2011 continue
2005 2012
2006 2013 try:
2007 2014 ff = [ l.split('\0') for l in delta.splitlines() ]
2008 2015 for f, fn in ff:
2009 2016 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2010 2017 except (ValueError, TypeError), inst:
2011 2018 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2012 2019
2013 2020 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2014 2021
2015 2022 for m, c in neededmanifests.items():
2016 2023 err(_("Changeset %s refers to unknown manifest %s") %
2017 2024 (short(m), short(c)))
2018 2025 del neededmanifests
2019 2026
2020 2027 for f in filenodes:
2021 2028 if f not in filelinkrevs:
2022 2029 err(_("file %s in manifest but not in changesets") % f)
2023 2030
2024 2031 for f in filelinkrevs:
2025 2032 if f not in filenodes:
2026 2033 err(_("file %s in changeset but not in manifest") % f)
2027 2034
2028 2035 self.ui.status(_("checking files\n"))
2029 2036 ff = filenodes.keys()
2030 2037 ff.sort()
2031 2038 for f in ff:
2032 2039 if f == "/dev/null":
2033 2040 continue
2034 2041 files += 1
2035 2042 if not f:
2036 2043 err(_("file without name in manifest %s") % short(n))
2037 2044 continue
2038 2045 fl = self.file(f)
2039 2046 checkversion(fl, f)
2040 2047 checksize(fl, f)
2041 2048
2042 2049 nodes = {nullid: 1}
2043 2050 seen = {}
2044 2051 for i in range(fl.count()):
2045 2052 revisions += 1
2046 2053 n = fl.node(i)
2047 2054
2048 2055 if n in seen:
2049 2056 err(_("%s: duplicate revision %d") % (f, i))
2050 2057 if n not in filenodes[f]:
2051 2058 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2052 2059 else:
2053 2060 del filenodes[f][n]
2054 2061
2055 2062 flr = fl.linkrev(n)
2056 2063 if flr not in filelinkrevs.get(f, []):
2057 2064 err(_("%s:%s points to unexpected changeset %d")
2058 2065 % (f, short(n), flr))
2059 2066 else:
2060 2067 filelinkrevs[f].remove(flr)
2061 2068
2062 2069 # verify contents
2063 2070 try:
2064 2071 t = fl.read(n)
2065 2072 except KeyboardInterrupt:
2066 2073 self.ui.warn(_("interrupted"))
2067 2074 raise
2068 2075 except Exception, inst:
2069 2076 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2070 2077
2071 2078 # verify parents
2072 2079 (p1, p2) = fl.parents(n)
2073 2080 if p1 not in nodes:
2074 2081 err(_("file %s:%s unknown parent 1 %s") %
2075 2082 (f, short(n), short(p1)))
2076 2083 if p2 not in nodes:
2077 2084 err(_("file %s:%s unknown parent 2 %s") %
2078 2085 (f, short(n), short(p1)))
2079 2086 nodes[n] = 1
2080 2087
2081 2088 # cross-check
2082 2089 for node in filenodes[f]:
2083 2090 err(_("node %s in manifests not in %s") % (hex(node), f))
2084 2091
2085 2092 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2086 2093 (files, changesets, revisions))
2087 2094
2088 2095 if warnings[0]:
2089 2096 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2090 2097 if errors[0]:
2091 2098 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2092 2099 return 1
2093 2100
2094 2101 # used to avoid circular references so destructors work
2095 2102 def aftertrans(base):
2096 2103 p = base
2097 2104 def a():
2098 2105 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2099 2106 util.rename(os.path.join(p, "journal.dirstate"),
2100 2107 os.path.join(p, "undo.dirstate"))
2101 2108 return a
2102 2109
@@ -1,176 +1,189
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct
9 9 from revlog import *
10 10 from i18n import gettext as _
11 11 from demandload import *
12 12 demandload(globals(), "bisect array")
13 13
14 14 class manifest(revlog):
15 15 def __init__(self, opener, defversion=REVLOGV0):
16 16 self.mapcache = None
17 17 self.listcache = None
18 18 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
19 19 defversion)
20 20
21 21 def read(self, node):
22 22 if node == nullid: return {} # don't upset local cache
23 23 if self.mapcache and self.mapcache[0] == node:
24 24 return self.mapcache[1]
25 25 text = self.revision(node)
26 26 map = {}
27 27 flag = {}
28 28 self.listcache = array.array('c', text)
29 29 lines = text.splitlines(1)
30 30 for l in lines:
31 31 (f, n) = l.split('\0')
32 32 map[f] = bin(n[:40])
33 33 flag[f] = (n[40:-1] == "x")
34 34 self.mapcache = (node, map, flag)
35 35 return map
36 36
37 37 def readflags(self, node):
38 38 if node == nullid: return {} # don't upset local cache
39 39 if not self.mapcache or self.mapcache[0] != node:
40 40 self.read(node)
41 41 return self.mapcache[2]
42 42
43 43 def diff(self, a, b):
44 44 return mdiff.textdiff(str(a), str(b))
45 45
46 def add(self, map, flags, transaction, link, p1=None, p2=None,
47 changed=None):
46 def _search(self, m, s, lo=0, hi=None):
47 '''return a tuple (start, end) that says where to find s within m.
48 48
49 # returns a tuple (start, end). If the string is found
50 # m[start:end] are the line containing that string. If start == end
51 # the string was not found and they indicate the proper sorted
52 # insertion point. This was taken from bisect_left, and modified
53 # to find line start/end as it goes along.
54 #
55 # m should be a buffer or a string
56 # s is a string
57 #
58 def manifestsearch(m, s, lo=0, hi=None):
49 If the string is found m[start:end] are the line containing
50 that string. If start == end the string was not found and
51 they indicate the proper sorted insertion point. This was
52 taken from bisect_left, and modified to find line start/end as
53 it goes along.
54
55 m should be a buffer or a string
56 s is a string'''
59 57 def advance(i, c):
60 58 while i < lenm and m[i] != c:
61 59 i += 1
62 60 return i
63 61 lenm = len(m)
64 62 if not hi:
65 63 hi = lenm
66 64 while lo < hi:
67 65 mid = (lo + hi) // 2
68 66 start = mid
69 67 while start > 0 and m[start-1] != '\n':
70 68 start -= 1
71 69 end = advance(start, '\0')
72 70 if m[start:end] < s:
73 71 # we know that after the null there are 40 bytes of sha1
74 72 # this translates to the bisect lo = mid + 1
75 73 lo = advance(end + 40, '\n') + 1
76 74 else:
77 75 # this translates to the bisect hi = mid
78 76 hi = start
79 77 end = advance(lo, '\0')
80 78 found = m[lo:end]
81 79 if cmp(s, found) == 0:
82 80 # we know that after the null there are 40 bytes of sha1
83 81 end = advance(end + 40, '\n')
84 82 return (lo, end+1)
85 83 else:
86 84 return (lo, lo)
87 85
86 def find(self, node, f):
87 '''look up entry for a single file efficiently.
88 return (node, flag) pair if found, (None, None) if not.'''
89 if self.mapcache and node == self.mapcache[0]:
90 return self.mapcache[1].get(f), self.mapcache[2].get(f)
91 text = self.revision(node)
92 start, end = self._search(text, f)
93 if start == end:
94 return None, None
95 l = text[start:end]
96 f, n = l.split('\0')
97 return bin(n[:40]), n[40:-1] == 'x'
98
99 def add(self, map, flags, transaction, link, p1=None, p2=None,
100 changed=None):
88 101 # apply the changes collected during the bisect loop to our addlist
89 102 # return a delta suitable for addrevision
90 103 def addlistdelta(addlist, x):
91 104 # start from the bottom up
92 105 # so changes to the offsets don't mess things up.
93 106 i = len(x)
94 107 while i > 0:
95 108 i -= 1
96 109 start = x[i][0]
97 110 end = x[i][1]
98 111 if x[i][2]:
99 112 addlist[start:end] = array.array('c', x[i][2])
100 113 else:
101 114 del addlist[start:end]
102 115 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
103 116 for d in x ])
104 117
105 118 # if we're using the listcache, make sure it is valid and
106 119 # parented by the same node we're diffing against
107 120 if not changed or not self.listcache or not p1 or \
108 121 self.mapcache[0] != p1:
109 122 files = map.keys()
110 123 files.sort()
111 124
112 125 # if this is changed to support newlines in filenames,
113 126 # be sure to check the templates/ dir again (especially *-raw.tmpl)
114 127 text = ["%s\000%s%s\n" %
115 128 (f, hex(map[f]), flags[f] and "x" or '')
116 129 for f in files]
117 130 self.listcache = array.array('c', "".join(text))
118 131 cachedelta = None
119 132 else:
120 133 addlist = self.listcache
121 134
122 135 # combine the changed lists into one list for sorting
123 136 work = [[x, 0] for x in changed[0]]
124 137 work[len(work):] = [[x, 1] for x in changed[1]]
125 138 work.sort()
126 139
127 140 delta = []
128 141 dstart = None
129 142 dend = None
130 143 dline = [""]
131 144 start = 0
132 145 # zero copy representation of addlist as a buffer
133 146 addbuf = buffer(addlist)
134 147
135 148 # start with a readonly loop that finds the offset of
136 149 # each line and creates the deltas
137 150 for w in work:
138 151 f = w[0]
139 152 # bs will either be the index of the item or the insert point
140 start, end = manifestsearch(addbuf, f, start)
153 start, end = self._search(addbuf, f, start)
141 154 if w[1] == 0:
142 155 l = "%s\000%s%s\n" % (f, hex(map[f]),
143 156 flags[f] and "x" or '')
144 157 else:
145 158 l = ""
146 159 if start == end and w[1] == 1:
147 160 # item we want to delete was not found, error out
148 161 raise AssertionError(
149 162 _("failed to remove %s from manifest\n") % f)
150 163 if dstart != None and dstart <= start and dend >= start:
151 164 if dend < end:
152 165 dend = end
153 166 if l:
154 167 dline.append(l)
155 168 else:
156 169 if dstart != None:
157 170 delta.append([dstart, dend, "".join(dline)])
158 171 dstart = start
159 172 dend = end
160 173 dline = [l]
161 174
162 175 if dstart != None:
163 176 delta.append([dstart, dend, "".join(dline)])
164 177 # apply the delta to the addlist, and get a delta for addrevision
165 178 cachedelta = addlistdelta(addlist, delta)
166 179
167 180 # the delta is only valid if we've been processing the tip revision
168 181 if self.mapcache[0] != self.tip():
169 182 cachedelta = None
170 183 self.listcache = addlist
171 184
172 185 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
173 186 p2, cachedelta)
174 187 self.mapcache = (n, map, flags)
175 188
176 189 return n
@@ -1,43 +1,62
1 1 #!/bin/sh
2 2
3 3 mkdir t
4 4 cd t
5 5 hg init
6 6 hg id
7 7 echo a > a
8 8 hg add a
9 9 hg commit -m "test" -d "1000000 0"
10 10 hg co
11 11 hg identify
12 12 T=`hg tip -v | head -n 1 | cut -d : -f 3`
13 13 echo "$T first" > .hgtags
14 14 cat .hgtags
15 15 hg add .hgtags
16 16 hg commit -m "add tags" -d "1000000 0"
17 17 hg tags
18 18 hg identify
19 19 echo bb > a
20 20 hg status
21 21 hg identify
22 22 hg co first
23 23 hg id
24 24 hg -v id
25 25 hg status
26 26 echo 1 > b
27 27 hg add b
28 28 hg commit -m "branch" -d "1000000 0"
29 29 hg id
30 30 hg merge 1
31 31 hg id
32 32 hg status
33 33
34 34 hg commit -m "merge" -d "1000000 0"
35
36 # create fake head, make sure tag not visible afterwards
37 cp .hgtags tags
38 hg tag -d "1000000 0" last
39 hg rm .hgtags
40 hg commit -m "remove" -d "1000000 0"
41
42 mv tags .hgtags
43 hg add .hgtags
44 hg commit -m "readd" -d "1000000 0"
45
46 hg tags
47
35 48 # invalid tags
36 49 echo "spam" >> .hgtags
37 50 echo >> .hgtags
38 51 echo "foo bar" >> .hgtags
39 52 echo "$T invalid" | sed "s/..../a5a5/" >> .hg/localtags
40 53 hg commit -m "tags" -d "1000000 0"
54
55 # report tag parse error on other head
56 hg up 3
57 echo 'x y' >> .hgtags
58 hg commit -m "head" -d "1000000 0"
59
41 60 hg tags
42 61 hg tip
43 62
@@ -1,32 +1,41
1 1 unknown
2 2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 3 0acdaf898367 tip
4 4 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 first
5 5 tip 1:8a3ca90d111dc784e6575d373105be12570e8776
6 6 first 0:0acdaf8983679e0aac16e811534eb49d7ee1f2b4
7 7 8a3ca90d111d tip
8 8 M a
9 9 8a3ca90d111d+ tip
10 10 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
11 11 0acdaf898367+ first
12 12 0acdaf8983679e0aac16e811534eb49d7ee1f2b4+ first
13 13 M a
14 14 8216907a933d tip
15 15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 16 (branch merge, don't forget to commit)
17 17 8216907a933d+8a3ca90d111d+ tip
18 18 M .hgtags
19 .hgtags:2: ignoring invalid tag
20 .hgtags:4: ignoring invalid tag
21 localtags:1: ignoring invalid tag
22 tip 4:fd868a874787a7b5af31e1675666ce691c803035
19 tip 6:c6af9d771a81bb9c7f267ec03491224a9f8ba1cd
23 20 first 0:0acdaf8983679e0aac16e811534eb49d7ee1f2b4
24 changeset: 4:fd868a874787
25 .hgtags:2: ignoring invalid tag
26 .hgtags:4: ignoring invalid tag
27 localtags:1: ignoring invalid tag
21 .hgtags (rev 7:39bba1bbbc4c), line 2: cannot parse entry
22 .hgtags (rev 7:39bba1bbbc4c), line 4: node 'foo' is not well formed
23 localtags, line 1: tag 'invalid' refers to unknown node
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25 .hgtags (rev 7:39bba1bbbc4c), line 2: cannot parse entry
26 .hgtags (rev 7:39bba1bbbc4c), line 4: node 'foo' is not well formed
27 .hgtags (rev 8:4ca6f1b1a68c), line 2: node 'x' is not well formed
28 localtags, line 1: tag 'invalid' refers to unknown node
29 tip 8:4ca6f1b1a68c77be687a03aaeb1614671ba59b20
30 first 0:0acdaf8983679e0aac16e811534eb49d7ee1f2b4
31 changeset: 8:4ca6f1b1a68c
32 .hgtags (rev 7:39bba1bbbc4c), line 2: cannot parse entry
33 .hgtags (rev 7:39bba1bbbc4c), line 4: node 'foo' is not well formed
34 .hgtags (rev 8:4ca6f1b1a68c), line 2: node 'x' is not well formed
35 localtags, line 1: tag 'invalid' refers to unknown node
28 36 tag: tip
37 parent: 3:b2ef3841386b
29 38 user: test
30 39 date: Mon Jan 12 13:46:40 1970 +0000
31 summary: tags
40 summary: head
32 41
General Comments 0
You need to be logged in to leave comments. Login now