##// END OF EJS Templates
context: add method to return all bookmarks pointing to a node
David Soria Parra -
r13384:caa56175 default
parent child Browse files
Show More
@@ -1,1103 +1,1105
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 10 import ancestor, bdiff, error, util, subrepo, patch, encoding
11 11 import os, errno, stat
12 12
13 13 propertycache = util.propertycache
14 14
15 15 class changectx(object):
16 16 """A changecontext object makes access to data related to a particular
17 17 changeset convenient."""
18 18 def __init__(self, repo, changeid=''):
19 19 """changeid is a revision number, node, or tag"""
20 20 if changeid == '':
21 21 changeid = '.'
22 22 self._repo = repo
23 23 if isinstance(changeid, (long, int)):
24 24 self._rev = changeid
25 25 self._node = self._repo.changelog.node(changeid)
26 26 else:
27 27 self._node = self._repo.lookup(changeid)
28 28 self._rev = self._repo.changelog.rev(self._node)
29 29
30 30 def __str__(self):
31 31 return short(self.node())
32 32
33 33 def __int__(self):
34 34 return self.rev()
35 35
36 36 def __repr__(self):
37 37 return "<changectx %s>" % str(self)
38 38
39 39 def __hash__(self):
40 40 try:
41 41 return hash(self._rev)
42 42 except AttributeError:
43 43 return id(self)
44 44
45 45 def __eq__(self, other):
46 46 try:
47 47 return self._rev == other._rev
48 48 except AttributeError:
49 49 return False
50 50
51 51 def __ne__(self, other):
52 52 return not (self == other)
53 53
54 54 def __nonzero__(self):
55 55 return self._rev != nullrev
56 56
57 57 @propertycache
58 58 def _changeset(self):
59 59 return self._repo.changelog.read(self.node())
60 60
61 61 @propertycache
62 62 def _manifest(self):
63 63 return self._repo.manifest.read(self._changeset[0])
64 64
65 65 @propertycache
66 66 def _manifestdelta(self):
67 67 return self._repo.manifest.readdelta(self._changeset[0])
68 68
69 69 @propertycache
70 70 def _parents(self):
71 71 p = self._repo.changelog.parentrevs(self._rev)
72 72 if p[1] == nullrev:
73 73 p = p[:-1]
74 74 return [changectx(self._repo, x) for x in p]
75 75
76 76 @propertycache
77 77 def substate(self):
78 78 return subrepo.state(self, self._repo.ui)
79 79
80 80 def __contains__(self, key):
81 81 return key in self._manifest
82 82
83 83 def __getitem__(self, key):
84 84 return self.filectx(key)
85 85
86 86 def __iter__(self):
87 87 for f in sorted(self._manifest):
88 88 yield f
89 89
90 90 def changeset(self):
91 91 return self._changeset
92 92 def manifest(self):
93 93 return self._manifest
94 94 def manifestnode(self):
95 95 return self._changeset[0]
96 96
97 97 def rev(self):
98 98 return self._rev
99 99 def node(self):
100 100 return self._node
101 101 def hex(self):
102 102 return hex(self._node)
103 103 def user(self):
104 104 return self._changeset[1]
105 105 def date(self):
106 106 return self._changeset[2]
107 107 def files(self):
108 108 return self._changeset[3]
109 109 def description(self):
110 110 return self._changeset[4]
111 111 def branch(self):
112 112 return encoding.tolocal(self._changeset[5].get("branch"))
113 113 def extra(self):
114 114 return self._changeset[5]
115 115 def tags(self):
116 116 return self._repo.nodetags(self._node)
117 def bookmarks(self):
118 return self._repo.nodebookmarks(self._node)
117 119
118 120 def parents(self):
119 121 """return contexts for each parent changeset"""
120 122 return self._parents
121 123
122 124 def p1(self):
123 125 return self._parents[0]
124 126
125 127 def p2(self):
126 128 if len(self._parents) == 2:
127 129 return self._parents[1]
128 130 return changectx(self._repo, -1)
129 131
130 132 def children(self):
131 133 """return contexts for each child changeset"""
132 134 c = self._repo.changelog.children(self._node)
133 135 return [changectx(self._repo, x) for x in c]
134 136
135 137 def ancestors(self):
136 138 for a in self._repo.changelog.ancestors(self._rev):
137 139 yield changectx(self._repo, a)
138 140
139 141 def descendants(self):
140 142 for d in self._repo.changelog.descendants(self._rev):
141 143 yield changectx(self._repo, d)
142 144
143 145 def _fileinfo(self, path):
144 146 if '_manifest' in self.__dict__:
145 147 try:
146 148 return self._manifest[path], self._manifest.flags(path)
147 149 except KeyError:
148 150 raise error.LookupError(self._node, path,
149 151 _('not found in manifest'))
150 152 if '_manifestdelta' in self.__dict__ or path in self.files():
151 153 if path in self._manifestdelta:
152 154 return self._manifestdelta[path], self._manifestdelta.flags(path)
153 155 node, flag = self._repo.manifest.find(self._changeset[0], path)
154 156 if not node:
155 157 raise error.LookupError(self._node, path,
156 158 _('not found in manifest'))
157 159
158 160 return node, flag
159 161
160 162 def filenode(self, path):
161 163 return self._fileinfo(path)[0]
162 164
163 165 def flags(self, path):
164 166 try:
165 167 return self._fileinfo(path)[1]
166 168 except error.LookupError:
167 169 return ''
168 170
169 171 def filectx(self, path, fileid=None, filelog=None):
170 172 """get a file context from this changeset"""
171 173 if fileid is None:
172 174 fileid = self.filenode(path)
173 175 return filectx(self._repo, path, fileid=fileid,
174 176 changectx=self, filelog=filelog)
175 177
176 178 def ancestor(self, c2):
177 179 """
178 180 return the ancestor context of self and c2
179 181 """
180 182 # deal with workingctxs
181 183 n2 = c2._node
182 184 if n2 is None:
183 185 n2 = c2._parents[0]._node
184 186 n = self._repo.changelog.ancestor(self._node, n2)
185 187 return changectx(self._repo, n)
186 188
187 189 def walk(self, match):
188 190 fset = set(match.files())
189 191 # for dirstate.walk, files=['.'] means "walk the whole tree".
190 192 # follow that here, too
191 193 fset.discard('.')
192 194 for fn in self:
193 195 for ffn in fset:
194 196 # match if the file is the exact name or a directory
195 197 if ffn == fn or fn.startswith("%s/" % ffn):
196 198 fset.remove(ffn)
197 199 break
198 200 if match(fn):
199 201 yield fn
200 202 for fn in sorted(fset):
201 203 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
202 204 yield fn
203 205
204 206 def sub(self, path):
205 207 return subrepo.subrepo(self, path)
206 208
207 209 def diff(self, ctx2=None, match=None, **opts):
208 210 """Returns a diff generator for the given contexts and matcher"""
209 211 if ctx2 is None:
210 212 ctx2 = self.p1()
211 213 if ctx2 is not None and not isinstance(ctx2, changectx):
212 214 ctx2 = self._repo[ctx2]
213 215 diffopts = patch.diffopts(self._repo.ui, opts)
214 216 return patch.diff(self._repo, ctx2.node(), self.node(),
215 217 match=match, opts=diffopts)
216 218
217 219 class filectx(object):
218 220 """A filecontext object makes access to data related to a particular
219 221 filerevision convenient."""
220 222 def __init__(self, repo, path, changeid=None, fileid=None,
221 223 filelog=None, changectx=None):
222 224 """changeid can be a changeset revision, node, or tag.
223 225 fileid can be a file revision or node."""
224 226 self._repo = repo
225 227 self._path = path
226 228
227 229 assert (changeid is not None
228 230 or fileid is not None
229 231 or changectx is not None), \
230 232 ("bad args: changeid=%r, fileid=%r, changectx=%r"
231 233 % (changeid, fileid, changectx))
232 234
233 235 if filelog:
234 236 self._filelog = filelog
235 237
236 238 if changeid is not None:
237 239 self._changeid = changeid
238 240 if changectx is not None:
239 241 self._changectx = changectx
240 242 if fileid is not None:
241 243 self._fileid = fileid
242 244
243 245 @propertycache
244 246 def _changectx(self):
245 247 return changectx(self._repo, self._changeid)
246 248
247 249 @propertycache
248 250 def _filelog(self):
249 251 return self._repo.file(self._path)
250 252
251 253 @propertycache
252 254 def _changeid(self):
253 255 if '_changectx' in self.__dict__:
254 256 return self._changectx.rev()
255 257 else:
256 258 return self._filelog.linkrev(self._filerev)
257 259
258 260 @propertycache
259 261 def _filenode(self):
260 262 if '_fileid' in self.__dict__:
261 263 return self._filelog.lookup(self._fileid)
262 264 else:
263 265 return self._changectx.filenode(self._path)
264 266
265 267 @propertycache
266 268 def _filerev(self):
267 269 return self._filelog.rev(self._filenode)
268 270
269 271 @propertycache
270 272 def _repopath(self):
271 273 return self._path
272 274
273 275 def __nonzero__(self):
274 276 try:
275 277 self._filenode
276 278 return True
277 279 except error.LookupError:
278 280 # file is missing
279 281 return False
280 282
281 283 def __str__(self):
282 284 return "%s@%s" % (self.path(), short(self.node()))
283 285
284 286 def __repr__(self):
285 287 return "<filectx %s>" % str(self)
286 288
287 289 def __hash__(self):
288 290 try:
289 291 return hash((self._path, self._filenode))
290 292 except AttributeError:
291 293 return id(self)
292 294
293 295 def __eq__(self, other):
294 296 try:
295 297 return (self._path == other._path
296 298 and self._filenode == other._filenode)
297 299 except AttributeError:
298 300 return False
299 301
300 302 def __ne__(self, other):
301 303 return not (self == other)
302 304
303 305 def filectx(self, fileid):
304 306 '''opens an arbitrary revision of the file without
305 307 opening a new filelog'''
306 308 return filectx(self._repo, self._path, fileid=fileid,
307 309 filelog=self._filelog)
308 310
309 311 def filerev(self):
310 312 return self._filerev
311 313 def filenode(self):
312 314 return self._filenode
313 315 def flags(self):
314 316 return self._changectx.flags(self._path)
315 317 def filelog(self):
316 318 return self._filelog
317 319
318 320 def rev(self):
319 321 if '_changectx' in self.__dict__:
320 322 return self._changectx.rev()
321 323 if '_changeid' in self.__dict__:
322 324 return self._changectx.rev()
323 325 return self._filelog.linkrev(self._filerev)
324 326
325 327 def linkrev(self):
326 328 return self._filelog.linkrev(self._filerev)
327 329 def node(self):
328 330 return self._changectx.node()
329 331 def hex(self):
330 332 return hex(self.node())
331 333 def user(self):
332 334 return self._changectx.user()
333 335 def date(self):
334 336 return self._changectx.date()
335 337 def files(self):
336 338 return self._changectx.files()
337 339 def description(self):
338 340 return self._changectx.description()
339 341 def branch(self):
340 342 return self._changectx.branch()
341 343 def extra(self):
342 344 return self._changectx.extra()
343 345 def manifest(self):
344 346 return self._changectx.manifest()
345 347 def changectx(self):
346 348 return self._changectx
347 349
348 350 def data(self):
349 351 return self._filelog.read(self._filenode)
350 352 def path(self):
351 353 return self._path
352 354 def size(self):
353 355 return self._filelog.size(self._filerev)
354 356
355 357 def cmp(self, fctx):
356 358 """compare with other file context
357 359
358 360 returns True if different than fctx.
359 361 """
360 362 if (fctx._filerev is None and self._repo._encodefilterpats
361 363 or self.size() == fctx.size()):
362 364 return self._filelog.cmp(self._filenode, fctx.data())
363 365
364 366 return True
365 367
366 368 def renamed(self):
367 369 """check if file was actually renamed in this changeset revision
368 370
369 371 If rename logged in file revision, we report copy for changeset only
370 372 if file revisions linkrev points back to the changeset in question
371 373 or both changeset parents contain different file revisions.
372 374 """
373 375
374 376 renamed = self._filelog.renamed(self._filenode)
375 377 if not renamed:
376 378 return renamed
377 379
378 380 if self.rev() == self.linkrev():
379 381 return renamed
380 382
381 383 name = self.path()
382 384 fnode = self._filenode
383 385 for p in self._changectx.parents():
384 386 try:
385 387 if fnode == p.filenode(name):
386 388 return None
387 389 except error.LookupError:
388 390 pass
389 391 return renamed
390 392
391 393 def parents(self):
392 394 p = self._path
393 395 fl = self._filelog
394 396 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
395 397
396 398 r = self._filelog.renamed(self._filenode)
397 399 if r:
398 400 pl[0] = (r[0], r[1], None)
399 401
400 402 return [filectx(self._repo, p, fileid=n, filelog=l)
401 403 for p, n, l in pl if n != nullid]
402 404
403 405 def children(self):
404 406 # hard for renames
405 407 c = self._filelog.children(self._filenode)
406 408 return [filectx(self._repo, self._path, fileid=x,
407 409 filelog=self._filelog) for x in c]
408 410
409 411 def annotate(self, follow=False, linenumber=None):
410 412 '''returns a list of tuples of (ctx, line) for each line
411 413 in the file, where ctx is the filectx of the node where
412 414 that line was last changed.
413 415 This returns tuples of ((ctx, linenumber), line) for each line,
414 416 if "linenumber" parameter is NOT "None".
415 417 In such tuples, linenumber means one at the first appearance
416 418 in the managed file.
417 419 To reduce annotation cost,
418 420 this returns fixed value(False is used) as linenumber,
419 421 if "linenumber" parameter is "False".'''
420 422
421 423 def decorate_compat(text, rev):
422 424 return ([rev] * len(text.splitlines()), text)
423 425
424 426 def without_linenumber(text, rev):
425 427 return ([(rev, False)] * len(text.splitlines()), text)
426 428
427 429 def with_linenumber(text, rev):
428 430 size = len(text.splitlines())
429 431 return ([(rev, i) for i in xrange(1, size + 1)], text)
430 432
431 433 decorate = (((linenumber is None) and decorate_compat) or
432 434 (linenumber and with_linenumber) or
433 435 without_linenumber)
434 436
435 437 def pair(parent, child):
436 438 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
437 439 child[0][b1:b2] = parent[0][a1:a2]
438 440 return child
439 441
440 442 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
441 443 def getctx(path, fileid):
442 444 log = path == self._path and self._filelog or getlog(path)
443 445 return filectx(self._repo, path, fileid=fileid, filelog=log)
444 446 getctx = util.lrucachefunc(getctx)
445 447
446 448 def parents(f):
447 449 # we want to reuse filectx objects as much as possible
448 450 p = f._path
449 451 if f._filerev is None: # working dir
450 452 pl = [(n.path(), n.filerev()) for n in f.parents()]
451 453 else:
452 454 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
453 455
454 456 if follow:
455 457 r = f.renamed()
456 458 if r:
457 459 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
458 460
459 461 return [getctx(p, n) for p, n in pl if n != nullrev]
460 462
461 463 # use linkrev to find the first changeset where self appeared
462 464 if self.rev() != self.linkrev():
463 465 base = self.filectx(self.filerev())
464 466 else:
465 467 base = self
466 468
467 469 # find all ancestors
468 470 needed = {base: 1}
469 471 visit = [base]
470 472 files = [base._path]
471 473 while visit:
472 474 f = visit.pop(0)
473 475 for p in parents(f):
474 476 if p not in needed:
475 477 needed[p] = 1
476 478 visit.append(p)
477 479 if p._path not in files:
478 480 files.append(p._path)
479 481 else:
480 482 # count how many times we'll use this
481 483 needed[p] += 1
482 484
483 485 # sort by revision (per file) which is a topological order
484 486 visit = []
485 487 for f in files:
486 488 visit.extend(n for n in needed if n._path == f)
487 489
488 490 hist = {}
489 491 for f in sorted(visit, key=lambda x: x.rev()):
490 492 curr = decorate(f.data(), f)
491 493 for p in parents(f):
492 494 curr = pair(hist[p], curr)
493 495 # trim the history of unneeded revs
494 496 needed[p] -= 1
495 497 if not needed[p]:
496 498 del hist[p]
497 499 hist[f] = curr
498 500
499 501 return zip(hist[f][0], hist[f][1].splitlines(True))
500 502
501 503 def ancestor(self, fc2, actx=None):
502 504 """
503 505 find the common ancestor file context, if any, of self, and fc2
504 506
505 507 If actx is given, it must be the changectx of the common ancestor
506 508 of self's and fc2's respective changesets.
507 509 """
508 510
509 511 if actx is None:
510 512 actx = self.changectx().ancestor(fc2.changectx())
511 513
512 514 # the trivial case: changesets are unrelated, files must be too
513 515 if not actx:
514 516 return None
515 517
516 518 # the easy case: no (relevant) renames
517 519 if fc2.path() == self.path() and self.path() in actx:
518 520 return actx[self.path()]
519 521 acache = {}
520 522
521 523 # prime the ancestor cache for the working directory
522 524 for c in (self, fc2):
523 525 if c._filerev is None:
524 526 pl = [(n.path(), n.filenode()) for n in c.parents()]
525 527 acache[(c._path, None)] = pl
526 528
527 529 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
528 530 def parents(vertex):
529 531 if vertex in acache:
530 532 return acache[vertex]
531 533 f, n = vertex
532 534 if f not in flcache:
533 535 flcache[f] = self._repo.file(f)
534 536 fl = flcache[f]
535 537 pl = [(f, p) for p in fl.parents(n) if p != nullid]
536 538 re = fl.renamed(n)
537 539 if re:
538 540 pl.append(re)
539 541 acache[vertex] = pl
540 542 return pl
541 543
542 544 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
543 545 v = ancestor.ancestor(a, b, parents)
544 546 if v:
545 547 f, n = v
546 548 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
547 549
548 550 return None
549 551
550 552 def ancestors(self):
551 553 seen = set(str(self))
552 554 visit = [self]
553 555 while visit:
554 556 for parent in visit.pop(0).parents():
555 557 s = str(parent)
556 558 if s not in seen:
557 559 visit.append(parent)
558 560 seen.add(s)
559 561 yield parent
560 562
561 563 class workingctx(changectx):
562 564 """A workingctx object makes access to data related to
563 565 the current working directory convenient.
564 566 date - any valid date string or (unixtime, offset), or None.
565 567 user - username string, or None.
566 568 extra - a dictionary of extra values, or None.
567 569 changes - a list of file lists as returned by localrepo.status()
568 570 or None to use the repository status.
569 571 """
570 572 def __init__(self, repo, text="", user=None, date=None, extra=None,
571 573 changes=None):
572 574 self._repo = repo
573 575 self._rev = None
574 576 self._node = None
575 577 self._text = text
576 578 if date:
577 579 self._date = util.parsedate(date)
578 580 if user:
579 581 self._user = user
580 582 if changes:
581 583 self._status = list(changes[:4])
582 584 self._unknown = changes[4]
583 585 self._ignored = changes[5]
584 586 self._clean = changes[6]
585 587 else:
586 588 self._unknown = None
587 589 self._ignored = None
588 590 self._clean = None
589 591
590 592 self._extra = {}
591 593 if extra:
592 594 self._extra = extra.copy()
593 595 if 'branch' not in self._extra:
594 596 try:
595 597 branch = encoding.fromlocal(self._repo.dirstate.branch())
596 598 except UnicodeDecodeError:
597 599 raise util.Abort(_('branch name not in UTF-8!'))
598 600 self._extra['branch'] = branch
599 601 if self._extra['branch'] == '':
600 602 self._extra['branch'] = 'default'
601 603
602 604 def __str__(self):
603 605 return str(self._parents[0]) + "+"
604 606
605 607 def __repr__(self):
606 608 return "<workingctx %s>" % str(self)
607 609
608 610 def __nonzero__(self):
609 611 return True
610 612
611 613 def __contains__(self, key):
612 614 return self._repo.dirstate[key] not in "?r"
613 615
614 616 @propertycache
615 617 def _manifest(self):
616 618 """generate a manifest corresponding to the working directory"""
617 619
618 620 if self._unknown is None:
619 621 self.status(unknown=True)
620 622
621 623 man = self._parents[0].manifest().copy()
622 624 copied = self._repo.dirstate.copies()
623 625 if len(self._parents) > 1:
624 626 man2 = self.p2().manifest()
625 627 def getman(f):
626 628 if f in man:
627 629 return man
628 630 return man2
629 631 else:
630 632 getman = lambda f: man
631 633 def cf(f):
632 634 f = copied.get(f, f)
633 635 return getman(f).flags(f)
634 636 ff = self._repo.dirstate.flagfunc(cf)
635 637 modified, added, removed, deleted = self._status
636 638 unknown = self._unknown
637 639 for i, l in (("a", added), ("m", modified), ("u", unknown)):
638 640 for f in l:
639 641 orig = copied.get(f, f)
640 642 man[f] = getman(orig).get(orig, nullid) + i
641 643 try:
642 644 man.set(f, ff(f))
643 645 except OSError:
644 646 pass
645 647
646 648 for f in deleted + removed:
647 649 if f in man:
648 650 del man[f]
649 651
650 652 return man
651 653
652 654 @propertycache
653 655 def _status(self):
654 656 return self._repo.status()[:4]
655 657
656 658 @propertycache
657 659 def _user(self):
658 660 return self._repo.ui.username()
659 661
660 662 @propertycache
661 663 def _date(self):
662 664 return util.makedate()
663 665
664 666 @propertycache
665 667 def _parents(self):
666 668 p = self._repo.dirstate.parents()
667 669 if p[1] == nullid:
668 670 p = p[:-1]
669 671 self._parents = [changectx(self._repo, x) for x in p]
670 672 return self._parents
671 673
672 674 def status(self, ignored=False, clean=False, unknown=False):
673 675 """Explicit status query
674 676 Unless this method is used to query the working copy status, the
675 677 _status property will implicitly read the status using its default
676 678 arguments."""
677 679 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
678 680 self._unknown = self._ignored = self._clean = None
679 681 if unknown:
680 682 self._unknown = stat[4]
681 683 if ignored:
682 684 self._ignored = stat[5]
683 685 if clean:
684 686 self._clean = stat[6]
685 687 self._status = stat[:4]
686 688 return stat
687 689
688 690 def manifest(self):
689 691 return self._manifest
690 692 def user(self):
691 693 return self._user or self._repo.ui.username()
692 694 def date(self):
693 695 return self._date
694 696 def description(self):
695 697 return self._text
696 698 def files(self):
697 699 return sorted(self._status[0] + self._status[1] + self._status[2])
698 700
699 701 def modified(self):
700 702 return self._status[0]
701 703 def added(self):
702 704 return self._status[1]
703 705 def removed(self):
704 706 return self._status[2]
705 707 def deleted(self):
706 708 return self._status[3]
707 709 def unknown(self):
708 710 assert self._unknown is not None # must call status first
709 711 return self._unknown
710 712 def ignored(self):
711 713 assert self._ignored is not None # must call status first
712 714 return self._ignored
713 715 def clean(self):
714 716 assert self._clean is not None # must call status first
715 717 return self._clean
716 718 def branch(self):
717 719 return encoding.tolocal(self._extra['branch'])
718 720 def extra(self):
719 721 return self._extra
720 722
721 723 def tags(self):
722 724 t = []
723 725 [t.extend(p.tags()) for p in self.parents()]
724 726 return t
725 727
726 728 def children(self):
727 729 return []
728 730
729 731 def flags(self, path):
730 732 if '_manifest' in self.__dict__:
731 733 try:
732 734 return self._manifest.flags(path)
733 735 except KeyError:
734 736 return ''
735 737
736 738 orig = self._repo.dirstate.copies().get(path, path)
737 739
738 740 def findflag(ctx):
739 741 mnode = ctx.changeset()[0]
740 742 node, flag = self._repo.manifest.find(mnode, orig)
741 743 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
742 744 try:
743 745 return ff(path)
744 746 except OSError:
745 747 pass
746 748
747 749 flag = findflag(self._parents[0])
748 750 if flag is None and len(self.parents()) > 1:
749 751 flag = findflag(self._parents[1])
750 752 if flag is None or self._repo.dirstate[path] == 'r':
751 753 return ''
752 754 return flag
753 755
754 756 def filectx(self, path, filelog=None):
755 757 """get a file context from the working directory"""
756 758 return workingfilectx(self._repo, path, workingctx=self,
757 759 filelog=filelog)
758 760
759 761 def ancestor(self, c2):
760 762 """return the ancestor context of self and c2"""
761 763 return self._parents[0].ancestor(c2) # punt on two parents for now
762 764
763 765 def walk(self, match):
764 766 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
765 767 True, False))
766 768
767 769 def dirty(self, missing=False):
768 770 "check whether a working directory is modified"
769 771 # check subrepos first
770 772 for s in self.substate:
771 773 if self.sub(s).dirty():
772 774 return True
773 775 # check current working dir
774 776 return (self.p2() or self.branch() != self.p1().branch() or
775 777 self.modified() or self.added() or self.removed() or
776 778 (missing and self.deleted()))
777 779
778 780 def add(self, list, prefix=""):
779 781 join = lambda f: os.path.join(prefix, f)
780 782 wlock = self._repo.wlock()
781 783 ui, ds = self._repo.ui, self._repo.dirstate
782 784 try:
783 785 rejected = []
784 786 for f in list:
785 787 p = self._repo.wjoin(f)
786 788 try:
787 789 st = os.lstat(p)
788 790 except:
789 791 ui.warn(_("%s does not exist!\n") % join(f))
790 792 rejected.append(f)
791 793 continue
792 794 if st.st_size > 10000000:
793 795 ui.warn(_("%s: up to %d MB of RAM may be required "
794 796 "to manage this file\n"
795 797 "(use 'hg revert %s' to cancel the "
796 798 "pending addition)\n")
797 799 % (f, 3 * st.st_size // 1000000, join(f)))
798 800 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
799 801 ui.warn(_("%s not added: only files and symlinks "
800 802 "supported currently\n") % join(f))
801 803 rejected.append(p)
802 804 elif ds[f] in 'amn':
803 805 ui.warn(_("%s already tracked!\n") % join(f))
804 806 elif ds[f] == 'r':
805 807 ds.normallookup(f)
806 808 else:
807 809 ds.add(f)
808 810 return rejected
809 811 finally:
810 812 wlock.release()
811 813
812 814 def forget(self, list):
813 815 wlock = self._repo.wlock()
814 816 try:
815 817 for f in list:
816 818 if self._repo.dirstate[f] != 'a':
817 819 self._repo.ui.warn(_("%s not added!\n") % f)
818 820 else:
819 821 self._repo.dirstate.forget(f)
820 822 finally:
821 823 wlock.release()
822 824
823 825 def ancestors(self):
824 826 for a in self._repo.changelog.ancestors(
825 827 *[p.rev() for p in self._parents]):
826 828 yield changectx(self._repo, a)
827 829
828 830 def remove(self, list, unlink=False):
829 831 if unlink:
830 832 for f in list:
831 833 try:
832 834 util.unlinkpath(self._repo.wjoin(f))
833 835 except OSError, inst:
834 836 if inst.errno != errno.ENOENT:
835 837 raise
836 838 wlock = self._repo.wlock()
837 839 try:
838 840 for f in list:
839 841 if unlink and os.path.lexists(self._repo.wjoin(f)):
840 842 self._repo.ui.warn(_("%s still exists!\n") % f)
841 843 elif self._repo.dirstate[f] == 'a':
842 844 self._repo.dirstate.forget(f)
843 845 elif f not in self._repo.dirstate:
844 846 self._repo.ui.warn(_("%s not tracked!\n") % f)
845 847 else:
846 848 self._repo.dirstate.remove(f)
847 849 finally:
848 850 wlock.release()
849 851
850 852 def undelete(self, list):
851 853 pctxs = self.parents()
852 854 wlock = self._repo.wlock()
853 855 try:
854 856 for f in list:
855 857 if self._repo.dirstate[f] != 'r':
856 858 self._repo.ui.warn(_("%s not removed!\n") % f)
857 859 else:
858 860 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
859 861 t = fctx.data()
860 862 self._repo.wwrite(f, t, fctx.flags())
861 863 self._repo.dirstate.normal(f)
862 864 finally:
863 865 wlock.release()
864 866
865 867 def copy(self, source, dest):
866 868 p = self._repo.wjoin(dest)
867 869 if not os.path.lexists(p):
868 870 self._repo.ui.warn(_("%s does not exist!\n") % dest)
869 871 elif not (os.path.isfile(p) or os.path.islink(p)):
870 872 self._repo.ui.warn(_("copy failed: %s is not a file or a "
871 873 "symbolic link\n") % dest)
872 874 else:
873 875 wlock = self._repo.wlock()
874 876 try:
875 877 if self._repo.dirstate[dest] in '?r':
876 878 self._repo.dirstate.add(dest)
877 879 self._repo.dirstate.copy(source, dest)
878 880 finally:
879 881 wlock.release()
880 882
881 883 class workingfilectx(filectx):
882 884 """A workingfilectx object makes access to data related to a particular
883 885 file in the working directory convenient."""
884 886 def __init__(self, repo, path, filelog=None, workingctx=None):
885 887 """changeid can be a changeset revision, node, or tag.
886 888 fileid can be a file revision or node."""
887 889 self._repo = repo
888 890 self._path = path
889 891 self._changeid = None
890 892 self._filerev = self._filenode = None
891 893
892 894 if filelog:
893 895 self._filelog = filelog
894 896 if workingctx:
895 897 self._changectx = workingctx
896 898
897 899 @propertycache
898 900 def _changectx(self):
899 901 return workingctx(self._repo)
900 902
901 903 def __nonzero__(self):
902 904 return True
903 905
904 906 def __str__(self):
905 907 return "%s@%s" % (self.path(), self._changectx)
906 908
907 909 def __repr__(self):
908 910 return "<workingfilectx %s>" % str(self)
909 911
910 912 def data(self):
911 913 return self._repo.wread(self._path)
912 914 def renamed(self):
913 915 rp = self._repo.dirstate.copied(self._path)
914 916 if not rp:
915 917 return None
916 918 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
917 919
918 920 def parents(self):
919 921 '''return parent filectxs, following copies if necessary'''
920 922 def filenode(ctx, path):
921 923 return ctx._manifest.get(path, nullid)
922 924
923 925 path = self._path
924 926 fl = self._filelog
925 927 pcl = self._changectx._parents
926 928 renamed = self.renamed()
927 929
928 930 if renamed:
929 931 pl = [renamed + (None,)]
930 932 else:
931 933 pl = [(path, filenode(pcl[0], path), fl)]
932 934
933 935 for pc in pcl[1:]:
934 936 pl.append((path, filenode(pc, path), fl))
935 937
936 938 return [filectx(self._repo, p, fileid=n, filelog=l)
937 939 for p, n, l in pl if n != nullid]
938 940
939 941 def children(self):
940 942 return []
941 943
942 944 def size(self):
943 945 return os.lstat(self._repo.wjoin(self._path)).st_size
944 946 def date(self):
945 947 t, tz = self._changectx.date()
946 948 try:
947 949 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
948 950 except OSError, err:
949 951 if err.errno != errno.ENOENT:
950 952 raise
951 953 return (t, tz)
952 954
953 955 def cmp(self, fctx):
954 956 """compare with other file context
955 957
956 958 returns True if different than fctx.
957 959 """
958 960 # fctx should be a filectx (not a wfctx)
959 961 # invert comparison to reuse the same code path
960 962 return fctx.cmp(self)
961 963
962 964 class memctx(object):
963 965 """Use memctx to perform in-memory commits via localrepo.commitctx().
964 966
965 967 Revision information is supplied at initialization time while
966 968 related files data and is made available through a callback
967 969 mechanism. 'repo' is the current localrepo, 'parents' is a
968 970 sequence of two parent revisions identifiers (pass None for every
969 971 missing parent), 'text' is the commit message and 'files' lists
970 972 names of files touched by the revision (normalized and relative to
971 973 repository root).
972 974
973 975 filectxfn(repo, memctx, path) is a callable receiving the
974 976 repository, the current memctx object and the normalized path of
975 977 requested file, relative to repository root. It is fired by the
976 978 commit function for every file in 'files', but calls order is
977 979 undefined. If the file is available in the revision being
978 980 committed (updated or added), filectxfn returns a memfilectx
979 981 object. If the file was removed, filectxfn raises an
980 982 IOError. Moved files are represented by marking the source file
981 983 removed and the new file added with copy information (see
982 984 memfilectx).
983 985
984 986 user receives the committer name and defaults to current
985 987 repository username, date is the commit date in any format
986 988 supported by util.parsedate() and defaults to current date, extra
987 989 is a dictionary of metadata or is left empty.
988 990 """
989 991 def __init__(self, repo, parents, text, files, filectxfn, user=None,
990 992 date=None, extra=None):
991 993 self._repo = repo
992 994 self._rev = None
993 995 self._node = None
994 996 self._text = text
995 997 self._date = date and util.parsedate(date) or util.makedate()
996 998 self._user = user
997 999 parents = [(p or nullid) for p in parents]
998 1000 p1, p2 = parents
999 1001 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1000 1002 files = sorted(set(files))
1001 1003 self._status = [files, [], [], [], []]
1002 1004 self._filectxfn = filectxfn
1003 1005
1004 1006 self._extra = extra and extra.copy() or {}
1005 1007 if 'branch' not in self._extra:
1006 1008 self._extra['branch'] = 'default'
1007 1009 elif self._extra.get('branch') == '':
1008 1010 self._extra['branch'] = 'default'
1009 1011
1010 1012 def __str__(self):
1011 1013 return str(self._parents[0]) + "+"
1012 1014
1013 1015 def __int__(self):
1014 1016 return self._rev
1015 1017
1016 1018 def __nonzero__(self):
1017 1019 return True
1018 1020
1019 1021 def __getitem__(self, key):
1020 1022 return self.filectx(key)
1021 1023
1022 1024 def p1(self):
1023 1025 return self._parents[0]
1024 1026 def p2(self):
1025 1027 return self._parents[1]
1026 1028
1027 1029 def user(self):
1028 1030 return self._user or self._repo.ui.username()
1029 1031 def date(self):
1030 1032 return self._date
1031 1033 def description(self):
1032 1034 return self._text
1033 1035 def files(self):
1034 1036 return self.modified()
1035 1037 def modified(self):
1036 1038 return self._status[0]
1037 1039 def added(self):
1038 1040 return self._status[1]
1039 1041 def removed(self):
1040 1042 return self._status[2]
1041 1043 def deleted(self):
1042 1044 return self._status[3]
1043 1045 def unknown(self):
1044 1046 return self._status[4]
1045 1047 def ignored(self):
1046 1048 return self._status[5]
1047 1049 def clean(self):
1048 1050 return self._status[6]
1049 1051 def branch(self):
1050 1052 return encoding.tolocal(self._extra['branch'])
1051 1053 def extra(self):
1052 1054 return self._extra
1053 1055 def flags(self, f):
1054 1056 return self[f].flags()
1055 1057
1056 1058 def parents(self):
1057 1059 """return contexts for each parent changeset"""
1058 1060 return self._parents
1059 1061
1060 1062 def filectx(self, path, filelog=None):
1061 1063 """get a file context from the working directory"""
1062 1064 return self._filectxfn(self._repo, self, path)
1063 1065
1064 1066 def commit(self):
1065 1067 """commit context to the repo"""
1066 1068 return self._repo.commitctx(self)
1067 1069
1068 1070 class memfilectx(object):
1069 1071 """memfilectx represents an in-memory file to commit.
1070 1072
1071 1073 See memctx for more details.
1072 1074 """
1073 1075 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1074 1076 """
1075 1077 path is the normalized file path relative to repository root.
1076 1078 data is the file content as a string.
1077 1079 islink is True if the file is a symbolic link.
1078 1080 isexec is True if the file is executable.
1079 1081 copied is the source file path if current file was copied in the
1080 1082 revision being committed, or None."""
1081 1083 self._path = path
1082 1084 self._data = data
1083 1085 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1084 1086 self._copied = None
1085 1087 if copied:
1086 1088 self._copied = (copied, nullid)
1087 1089
1088 1090 def __nonzero__(self):
1089 1091 return True
1090 1092 def __str__(self):
1091 1093 return "%s@%s" % (self.path(), self._changectx)
1092 1094 def path(self):
1093 1095 return self._path
1094 1096 def data(self):
1095 1097 return self._data
1096 1098 def flags(self):
1097 1099 return self._flags
1098 1100 def isexec(self):
1099 1101 return 'x' in self._flags
1100 1102 def islink(self):
1101 1103 return 'l' in self._flags
1102 1104 def renamed(self):
1103 1105 return self._copied
@@ -1,2006 +1,2013
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supportedformats = set(('revlogv1', 'parentdelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = util.path_auditor(self.root, self._checknested)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 os.mkdir(self.path)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener("00changelog.i", "a").write(
60 60 '\0\0\0\2' # represents revlogv2
61 61 ' dummy changelog to prevent using the old repo layout'
62 62 )
63 63 if self.ui.configbool('format', 'parentdelta', False):
64 64 requirements.append("parentdelta")
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118 if 'parentdelta' in requirements:
119 119 self.sopener.options['parentdelta'] = 1
120 120
121 121 def _writerequirements(self):
122 122 reqfile = self.opener("requires", "w")
123 123 for r in self.requirements:
124 124 reqfile.write("%s\n" % r)
125 125 reqfile.close()
126 126
127 127 def _checknested(self, path):
128 128 """Determine if path is a legal nested repository."""
129 129 if not path.startswith(self.root):
130 130 return False
131 131 subpath = path[len(self.root) + 1:]
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = os.sep.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == subpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 164 @util.propertycache
165 165 def _bookmarks(self):
166 166 return bookmarks.read(self)
167 167
168 168 @util.propertycache
169 169 def _bookmarkcurrent(self):
170 170 return bookmarks.readcurrent(self)
171 171
172 172 @propertycache
173 173 def changelog(self):
174 174 c = changelog.changelog(self.sopener)
175 175 if 'HG_PENDING' in os.environ:
176 176 p = os.environ['HG_PENDING']
177 177 if p.startswith(self.root):
178 178 c.readpending('00changelog.i.a')
179 179 self.sopener.options['defversion'] = c.version
180 180 return c
181 181
182 182 @propertycache
183 183 def manifest(self):
184 184 return manifest.manifest(self.sopener)
185 185
186 186 @propertycache
187 187 def dirstate(self):
188 188 warned = [0]
189 189 def validate(node):
190 190 try:
191 191 r = self.changelog.rev(node)
192 192 return node
193 193 except error.LookupError:
194 194 if not warned[0]:
195 195 warned[0] = True
196 196 self.ui.warn(_("warning: ignoring unknown"
197 197 " working parent %s!\n") % short(node))
198 198 return nullid
199 199
200 200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 201
202 202 def __getitem__(self, changeid):
203 203 if changeid is None:
204 204 return context.workingctx(self)
205 205 return context.changectx(self, changeid)
206 206
207 207 def __contains__(self, changeid):
208 208 try:
209 209 return bool(self.lookup(changeid))
210 210 except error.RepoLookupError:
211 211 return False
212 212
213 213 def __nonzero__(self):
214 214 return True
215 215
216 216 def __len__(self):
217 217 return len(self.changelog)
218 218
219 219 def __iter__(self):
220 220 for i in xrange(len(self)):
221 221 yield i
222 222
223 223 def url(self):
224 224 return 'file:' + self.root
225 225
226 226 def hook(self, name, throw=False, **args):
227 227 return hook.hook(self.ui, self, name, throw, **args)
228 228
229 229 tag_disallowed = ':\r\n'
230 230
231 231 def _tag(self, names, node, message, local, user, date, extra={}):
232 232 if isinstance(names, str):
233 233 allchars = names
234 234 names = (names,)
235 235 else:
236 236 allchars = ''.join(names)
237 237 for c in self.tag_disallowed:
238 238 if c in allchars:
239 239 raise util.Abort(_('%r cannot be used in a tag name') % c)
240 240
241 241 branches = self.branchmap()
242 242 for name in names:
243 243 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 244 local=local)
245 245 if name in branches:
246 246 self.ui.warn(_("warning: tag %s conflicts with existing"
247 247 " branch name\n") % name)
248 248
249 249 def writetags(fp, names, munge, prevtags):
250 250 fp.seek(0, 2)
251 251 if prevtags and prevtags[-1] != '\n':
252 252 fp.write('\n')
253 253 for name in names:
254 254 m = munge and munge(name) or name
255 255 if self._tagtypes and name in self._tagtypes:
256 256 old = self._tags.get(name, nullid)
257 257 fp.write('%s %s\n' % (hex(old), m))
258 258 fp.write('%s %s\n' % (hex(node), m))
259 259 fp.close()
260 260
261 261 prevtags = ''
262 262 if local:
263 263 try:
264 264 fp = self.opener('localtags', 'r+')
265 265 except IOError:
266 266 fp = self.opener('localtags', 'a')
267 267 else:
268 268 prevtags = fp.read()
269 269
270 270 # local tags are stored in the current charset
271 271 writetags(fp, names, None, prevtags)
272 272 for name in names:
273 273 self.hook('tag', node=hex(node), tag=name, local=local)
274 274 return
275 275
276 276 try:
277 277 fp = self.wfile('.hgtags', 'rb+')
278 278 except IOError:
279 279 fp = self.wfile('.hgtags', 'ab')
280 280 else:
281 281 prevtags = fp.read()
282 282
283 283 # committed tags are stored in UTF-8
284 284 writetags(fp, names, encoding.fromlocal, prevtags)
285 285
286 286 if '.hgtags' not in self.dirstate:
287 287 self[None].add(['.hgtags'])
288 288
289 289 m = matchmod.exact(self.root, '', ['.hgtags'])
290 290 tagnode = self.commit(message, user, date, extra=extra, match=m)
291 291
292 292 for name in names:
293 293 self.hook('tag', node=hex(node), tag=name, local=local)
294 294
295 295 return tagnode
296 296
297 297 def tag(self, names, node, message, local, user, date):
298 298 '''tag a revision with one or more symbolic names.
299 299
300 300 names is a list of strings or, when adding a single tag, names may be a
301 301 string.
302 302
303 303 if local is True, the tags are stored in a per-repository file.
304 304 otherwise, they are stored in the .hgtags file, and a new
305 305 changeset is committed with the change.
306 306
307 307 keyword arguments:
308 308
309 309 local: whether to store tags in non-version-controlled file
310 310 (default False)
311 311
312 312 message: commit message to use if committing
313 313
314 314 user: name of user to use if committing
315 315
316 316 date: date tuple to use if committing'''
317 317
318 318 if not local:
319 319 for x in self.status()[:5]:
320 320 if '.hgtags' in x:
321 321 raise util.Abort(_('working copy of .hgtags is changed '
322 322 '(please commit .hgtags manually)'))
323 323
324 324 self.tags() # instantiate the cache
325 325 self._tag(names, node, message, local, user, date)
326 326
327 327 def tags(self):
328 328 '''return a mapping of tag to node'''
329 329 if self._tags is None:
330 330 (self._tags, self._tagtypes) = self._findtags()
331 331
332 332 return self._tags
333 333
334 334 def _findtags(self):
335 335 '''Do the hard work of finding tags. Return a pair of dicts
336 336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
337 337 maps tag name to a string like \'global\' or \'local\'.
338 338 Subclasses or extensions are free to add their own tags, but
339 339 should be aware that the returned dicts will be retained for the
340 340 duration of the localrepo object.'''
341 341
342 342 # XXX what tagtype should subclasses/extensions use? Currently
343 343 # mq and bookmarks add tags, but do not set the tagtype at all.
344 344 # Should each extension invent its own tag type? Should there
345 345 # be one tagtype for all such "virtual" tags? Or is the status
346 346 # quo fine?
347 347
348 348 alltags = {} # map tag name to (node, hist)
349 349 tagtypes = {}
350 350
351 351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
352 352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
353 353
354 354 # Build the return dicts. Have to re-encode tag names because
355 355 # the tags module always uses UTF-8 (in order not to lose info
356 356 # writing to the cache), but the rest of Mercurial wants them in
357 357 # local encoding.
358 358 tags = {}
359 359 for (name, (node, hist)) in alltags.iteritems():
360 360 if node != nullid:
361 361 tags[encoding.tolocal(name)] = node
362 362 tags['tip'] = self.changelog.tip()
363 363 tags.update(self._bookmarks)
364 364 tagtypes = dict([(encoding.tolocal(name), value)
365 365 for (name, value) in tagtypes.iteritems()])
366 366 return (tags, tagtypes)
367 367
368 368 def tagtype(self, tagname):
369 369 '''
370 370 return the type of the given tag. result can be:
371 371
372 372 'local' : a local tag
373 373 'global' : a global tag
374 374 None : tag does not exist
375 375 '''
376 376
377 377 self.tags()
378 378
379 379 return self._tagtypes.get(tagname)
380 380
381 381 def tagslist(self):
382 382 '''return a list of tags ordered by revision'''
383 383 l = []
384 384 for t, n in self.tags().iteritems():
385 385 try:
386 386 r = self.changelog.rev(n)
387 387 except:
388 388 r = -2 # sort to the beginning of the list if unknown
389 389 l.append((r, t, n))
390 390 return [(t, n) for r, t, n in sorted(l)]
391 391
392 392 def nodetags(self, node):
393 393 '''return the tags associated with a node'''
394 394 if not self.nodetagscache:
395 395 self.nodetagscache = {}
396 396 for t, n in self.tags().iteritems():
397 397 self.nodetagscache.setdefault(n, []).append(t)
398 398 for tags in self.nodetagscache.itervalues():
399 399 tags.sort()
400 400 return self.nodetagscache.get(node, [])
401 401
402 def nodebookmarks(self, node):
403 marks = []
404 for bookmark, n in self._bookmarks.iteritems():
405 if n == node:
406 marks.append(bookmark)
407 return sorted(marks)
408
402 409 def _branchtags(self, partial, lrev):
403 410 # TODO: rename this function?
404 411 tiprev = len(self) - 1
405 412 if lrev != tiprev:
406 413 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
407 414 self._updatebranchcache(partial, ctxgen)
408 415 self._writebranchcache(partial, self.changelog.tip(), tiprev)
409 416
410 417 return partial
411 418
412 419 def updatebranchcache(self):
413 420 tip = self.changelog.tip()
414 421 if self._branchcache is not None and self._branchcachetip == tip:
415 422 return self._branchcache
416 423
417 424 oldtip = self._branchcachetip
418 425 self._branchcachetip = tip
419 426 if oldtip is None or oldtip not in self.changelog.nodemap:
420 427 partial, last, lrev = self._readbranchcache()
421 428 else:
422 429 lrev = self.changelog.rev(oldtip)
423 430 partial = self._branchcache
424 431
425 432 self._branchtags(partial, lrev)
426 433 # this private cache holds all heads (not just tips)
427 434 self._branchcache = partial
428 435
429 436 def branchmap(self):
430 437 '''returns a dictionary {branch: [branchheads]}'''
431 438 self.updatebranchcache()
432 439 return self._branchcache
433 440
434 441 def branchtags(self):
435 442 '''return a dict where branch names map to the tipmost head of
436 443 the branch, open heads come before closed'''
437 444 bt = {}
438 445 for bn, heads in self.branchmap().iteritems():
439 446 tip = heads[-1]
440 447 for h in reversed(heads):
441 448 if 'close' not in self.changelog.read(h)[5]:
442 449 tip = h
443 450 break
444 451 bt[bn] = tip
445 452 return bt
446 453
447 454 def _readbranchcache(self):
448 455 partial = {}
449 456 try:
450 457 f = self.opener("cache/branchheads")
451 458 lines = f.read().split('\n')
452 459 f.close()
453 460 except (IOError, OSError):
454 461 return {}, nullid, nullrev
455 462
456 463 try:
457 464 last, lrev = lines.pop(0).split(" ", 1)
458 465 last, lrev = bin(last), int(lrev)
459 466 if lrev >= len(self) or self[lrev].node() != last:
460 467 # invalidate the cache
461 468 raise ValueError('invalidating branch cache (tip differs)')
462 469 for l in lines:
463 470 if not l:
464 471 continue
465 472 node, label = l.split(" ", 1)
466 473 label = encoding.tolocal(label.strip())
467 474 partial.setdefault(label, []).append(bin(node))
468 475 except KeyboardInterrupt:
469 476 raise
470 477 except Exception, inst:
471 478 if self.ui.debugflag:
472 479 self.ui.warn(str(inst), '\n')
473 480 partial, last, lrev = {}, nullid, nullrev
474 481 return partial, last, lrev
475 482
476 483 def _writebranchcache(self, branches, tip, tiprev):
477 484 try:
478 485 f = self.opener("cache/branchheads", "w", atomictemp=True)
479 486 f.write("%s %s\n" % (hex(tip), tiprev))
480 487 for label, nodes in branches.iteritems():
481 488 for node in nodes:
482 489 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
483 490 f.rename()
484 491 except (IOError, OSError):
485 492 pass
486 493
487 494 def _updatebranchcache(self, partial, ctxgen):
488 495 # collect new branch entries
489 496 newbranches = {}
490 497 for c in ctxgen:
491 498 newbranches.setdefault(c.branch(), []).append(c.node())
492 499 # if older branchheads are reachable from new ones, they aren't
493 500 # really branchheads. Note checking parents is insufficient:
494 501 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
495 502 for branch, newnodes in newbranches.iteritems():
496 503 bheads = partial.setdefault(branch, [])
497 504 bheads.extend(newnodes)
498 505 if len(bheads) <= 1:
499 506 continue
500 507 # starting from tip means fewer passes over reachable
501 508 while newnodes:
502 509 latest = newnodes.pop()
503 510 if latest not in bheads:
504 511 continue
505 512 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
506 513 reachable = self.changelog.reachable(latest, minbhrev)
507 514 reachable.remove(latest)
508 515 bheads = [b for b in bheads if b not in reachable]
509 516 partial[branch] = bheads
510 517
511 518 def lookup(self, key):
512 519 if isinstance(key, int):
513 520 return self.changelog.node(key)
514 521 elif key == '.':
515 522 return self.dirstate.parents()[0]
516 523 elif key == 'null':
517 524 return nullid
518 525 elif key == 'tip':
519 526 return self.changelog.tip()
520 527 n = self.changelog._match(key)
521 528 if n:
522 529 return n
523 530 if key in self._bookmarks:
524 531 return self._bookmarks[key]
525 532 if key in self.tags():
526 533 return self.tags()[key]
527 534 if key in self.branchtags():
528 535 return self.branchtags()[key]
529 536 n = self.changelog._partialmatch(key)
530 537 if n:
531 538 return n
532 539
533 540 # can't find key, check if it might have come from damaged dirstate
534 541 if key in self.dirstate.parents():
535 542 raise error.Abort(_("working directory has unknown parent '%s'!")
536 543 % short(key))
537 544 try:
538 545 if len(key) == 20:
539 546 key = hex(key)
540 547 except:
541 548 pass
542 549 raise error.RepoLookupError(_("unknown revision '%s'") % key)
543 550
544 551 def lookupbranch(self, key, remote=None):
545 552 repo = remote or self
546 553 if key in repo.branchmap():
547 554 return key
548 555
549 556 repo = (remote and remote.local()) and remote or self
550 557 return repo[key].branch()
551 558
552 559 def local(self):
553 560 return True
554 561
555 562 def join(self, f):
556 563 return os.path.join(self.path, f)
557 564
558 565 def wjoin(self, f):
559 566 return os.path.join(self.root, f)
560 567
561 568 def file(self, f):
562 569 if f[0] == '/':
563 570 f = f[1:]
564 571 return filelog.filelog(self.sopener, f)
565 572
566 573 def changectx(self, changeid):
567 574 return self[changeid]
568 575
569 576 def parents(self, changeid=None):
570 577 '''get list of changectxs for parents of changeid'''
571 578 return self[changeid].parents()
572 579
573 580 def filectx(self, path, changeid=None, fileid=None):
574 581 """changeid can be a changeset revision, node, or tag.
575 582 fileid can be a file revision or node."""
576 583 return context.filectx(self, path, changeid, fileid)
577 584
578 585 def getcwd(self):
579 586 return self.dirstate.getcwd()
580 587
581 588 def pathto(self, f, cwd=None):
582 589 return self.dirstate.pathto(f, cwd)
583 590
584 591 def wfile(self, f, mode='r'):
585 592 return self.wopener(f, mode)
586 593
587 594 def _link(self, f):
588 595 return os.path.islink(self.wjoin(f))
589 596
590 597 def _loadfilter(self, filter):
591 598 if filter not in self.filterpats:
592 599 l = []
593 600 for pat, cmd in self.ui.configitems(filter):
594 601 if cmd == '!':
595 602 continue
596 603 mf = matchmod.match(self.root, '', [pat])
597 604 fn = None
598 605 params = cmd
599 606 for name, filterfn in self._datafilters.iteritems():
600 607 if cmd.startswith(name):
601 608 fn = filterfn
602 609 params = cmd[len(name):].lstrip()
603 610 break
604 611 if not fn:
605 612 fn = lambda s, c, **kwargs: util.filter(s, c)
606 613 # Wrap old filters not supporting keyword arguments
607 614 if not inspect.getargspec(fn)[2]:
608 615 oldfn = fn
609 616 fn = lambda s, c, **kwargs: oldfn(s, c)
610 617 l.append((mf, fn, params))
611 618 self.filterpats[filter] = l
612 619 return self.filterpats[filter]
613 620
614 621 def _filter(self, filterpats, filename, data):
615 622 for mf, fn, cmd in filterpats:
616 623 if mf(filename):
617 624 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
618 625 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
619 626 break
620 627
621 628 return data
622 629
623 630 @propertycache
624 631 def _encodefilterpats(self):
625 632 return self._loadfilter('encode')
626 633
627 634 @propertycache
628 635 def _decodefilterpats(self):
629 636 return self._loadfilter('decode')
630 637
631 638 def adddatafilter(self, name, filter):
632 639 self._datafilters[name] = filter
633 640
634 641 def wread(self, filename):
635 642 if self._link(filename):
636 643 data = os.readlink(self.wjoin(filename))
637 644 else:
638 645 data = self.wopener(filename, 'r').read()
639 646 return self._filter(self._encodefilterpats, filename, data)
640 647
641 648 def wwrite(self, filename, data, flags):
642 649 data = self._filter(self._decodefilterpats, filename, data)
643 650 if 'l' in flags:
644 651 self.wopener.symlink(data, filename)
645 652 else:
646 653 self.wopener(filename, 'w').write(data)
647 654 if 'x' in flags:
648 655 util.set_flags(self.wjoin(filename), False, True)
649 656
650 657 def wwritedata(self, filename, data):
651 658 return self._filter(self._decodefilterpats, filename, data)
652 659
653 660 def transaction(self, desc):
654 661 tr = self._transref and self._transref() or None
655 662 if tr and tr.running():
656 663 return tr.nest()
657 664
658 665 # abort here if the journal already exists
659 666 if os.path.exists(self.sjoin("journal")):
660 667 raise error.RepoError(
661 668 _("abandoned transaction found - run hg recover"))
662 669
663 670 # save dirstate for rollback
664 671 try:
665 672 ds = self.opener("dirstate").read()
666 673 except IOError:
667 674 ds = ""
668 675 self.opener("journal.dirstate", "w").write(ds)
669 676 self.opener("journal.branch", "w").write(
670 677 encoding.fromlocal(self.dirstate.branch()))
671 678 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
672 679
673 680 renames = [(self.sjoin("journal"), self.sjoin("undo")),
674 681 (self.join("journal.dirstate"), self.join("undo.dirstate")),
675 682 (self.join("journal.branch"), self.join("undo.branch")),
676 683 (self.join("journal.desc"), self.join("undo.desc"))]
677 684 tr = transaction.transaction(self.ui.warn, self.sopener,
678 685 self.sjoin("journal"),
679 686 aftertrans(renames),
680 687 self.store.createmode)
681 688 self._transref = weakref.ref(tr)
682 689 return tr
683 690
684 691 def recover(self):
685 692 lock = self.lock()
686 693 try:
687 694 if os.path.exists(self.sjoin("journal")):
688 695 self.ui.status(_("rolling back interrupted transaction\n"))
689 696 transaction.rollback(self.sopener, self.sjoin("journal"),
690 697 self.ui.warn)
691 698 self.invalidate()
692 699 return True
693 700 else:
694 701 self.ui.warn(_("no interrupted transaction available\n"))
695 702 return False
696 703 finally:
697 704 lock.release()
698 705
699 706 def rollback(self, dryrun=False):
700 707 wlock = lock = None
701 708 try:
702 709 wlock = self.wlock()
703 710 lock = self.lock()
704 711 if os.path.exists(self.sjoin("undo")):
705 712 try:
706 713 args = self.opener("undo.desc", "r").read().splitlines()
707 714 if len(args) >= 3 and self.ui.verbose:
708 715 desc = _("rolling back to revision %s"
709 716 " (undo %s: %s)\n") % (
710 717 int(args[0]) - 1, args[1], args[2])
711 718 elif len(args) >= 2:
712 719 desc = _("rolling back to revision %s (undo %s)\n") % (
713 720 int(args[0]) - 1, args[1])
714 721 except IOError:
715 722 desc = _("rolling back unknown transaction\n")
716 723 self.ui.status(desc)
717 724 if dryrun:
718 725 return
719 726 transaction.rollback(self.sopener, self.sjoin("undo"),
720 727 self.ui.warn)
721 728 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
722 729 if os.path.exists(self.join('undo.bookmarks')):
723 730 util.rename(self.join('undo.bookmarks'),
724 731 self.join('bookmarks'))
725 732 try:
726 733 branch = self.opener("undo.branch").read()
727 734 self.dirstate.setbranch(branch)
728 735 except IOError:
729 736 self.ui.warn(_("Named branch could not be reset, "
730 737 "current branch still is: %s\n")
731 738 % self.dirstate.branch())
732 739 self.invalidate()
733 740 self.dirstate.invalidate()
734 741 self.destroyed()
735 742 else:
736 743 self.ui.warn(_("no rollback information available\n"))
737 744 return 1
738 745 finally:
739 746 release(lock, wlock)
740 747
741 748 def invalidatecaches(self):
742 749 self._tags = None
743 750 self._tagtypes = None
744 751 self.nodetagscache = None
745 752 self._branchcache = None # in UTF-8
746 753 self._branchcachetip = None
747 754
748 755 def invalidate(self):
749 756 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
750 757 if a in self.__dict__:
751 758 delattr(self, a)
752 759 self.invalidatecaches()
753 760
754 761 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
755 762 try:
756 763 l = lock.lock(lockname, 0, releasefn, desc=desc)
757 764 except error.LockHeld, inst:
758 765 if not wait:
759 766 raise
760 767 self.ui.warn(_("waiting for lock on %s held by %r\n") %
761 768 (desc, inst.locker))
762 769 # default to 600 seconds timeout
763 770 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
764 771 releasefn, desc=desc)
765 772 if acquirefn:
766 773 acquirefn()
767 774 return l
768 775
769 776 def lock(self, wait=True):
770 777 '''Lock the repository store (.hg/store) and return a weak reference
771 778 to the lock. Use this before modifying the store (e.g. committing or
772 779 stripping). If you are opening a transaction, get a lock as well.)'''
773 780 l = self._lockref and self._lockref()
774 781 if l is not None and l.held:
775 782 l.lock()
776 783 return l
777 784
778 785 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
779 786 _('repository %s') % self.origroot)
780 787 self._lockref = weakref.ref(l)
781 788 return l
782 789
783 790 def wlock(self, wait=True):
784 791 '''Lock the non-store parts of the repository (everything under
785 792 .hg except .hg/store) and return a weak reference to the lock.
786 793 Use this before modifying files in .hg.'''
787 794 l = self._wlockref and self._wlockref()
788 795 if l is not None and l.held:
789 796 l.lock()
790 797 return l
791 798
792 799 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
793 800 self.dirstate.invalidate, _('working directory of %s') %
794 801 self.origroot)
795 802 self._wlockref = weakref.ref(l)
796 803 return l
797 804
798 805 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
799 806 """
800 807 commit an individual file as part of a larger transaction
801 808 """
802 809
803 810 fname = fctx.path()
804 811 text = fctx.data()
805 812 flog = self.file(fname)
806 813 fparent1 = manifest1.get(fname, nullid)
807 814 fparent2 = fparent2o = manifest2.get(fname, nullid)
808 815
809 816 meta = {}
810 817 copy = fctx.renamed()
811 818 if copy and copy[0] != fname:
812 819 # Mark the new revision of this file as a copy of another
813 820 # file. This copy data will effectively act as a parent
814 821 # of this new revision. If this is a merge, the first
815 822 # parent will be the nullid (meaning "look up the copy data")
816 823 # and the second one will be the other parent. For example:
817 824 #
818 825 # 0 --- 1 --- 3 rev1 changes file foo
819 826 # \ / rev2 renames foo to bar and changes it
820 827 # \- 2 -/ rev3 should have bar with all changes and
821 828 # should record that bar descends from
822 829 # bar in rev2 and foo in rev1
823 830 #
824 831 # this allows this merge to succeed:
825 832 #
826 833 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
827 834 # \ / merging rev3 and rev4 should use bar@rev2
828 835 # \- 2 --- 4 as the merge base
829 836 #
830 837
831 838 cfname = copy[0]
832 839 crev = manifest1.get(cfname)
833 840 newfparent = fparent2
834 841
835 842 if manifest2: # branch merge
836 843 if fparent2 == nullid or crev is None: # copied on remote side
837 844 if cfname in manifest2:
838 845 crev = manifest2[cfname]
839 846 newfparent = fparent1
840 847
841 848 # find source in nearest ancestor if we've lost track
842 849 if not crev:
843 850 self.ui.debug(" %s: searching for copy revision for %s\n" %
844 851 (fname, cfname))
845 852 for ancestor in self[None].ancestors():
846 853 if cfname in ancestor:
847 854 crev = ancestor[cfname].filenode()
848 855 break
849 856
850 857 if crev:
851 858 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
852 859 meta["copy"] = cfname
853 860 meta["copyrev"] = hex(crev)
854 861 fparent1, fparent2 = nullid, newfparent
855 862 else:
856 863 self.ui.warn(_("warning: can't find ancestor for '%s' "
857 864 "copied from '%s'!\n") % (fname, cfname))
858 865
859 866 elif fparent2 != nullid:
860 867 # is one parent an ancestor of the other?
861 868 fparentancestor = flog.ancestor(fparent1, fparent2)
862 869 if fparentancestor == fparent1:
863 870 fparent1, fparent2 = fparent2, nullid
864 871 elif fparentancestor == fparent2:
865 872 fparent2 = nullid
866 873
867 874 # is the file changed?
868 875 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
869 876 changelist.append(fname)
870 877 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
871 878
872 879 # are just the flags changed during merge?
873 880 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
874 881 changelist.append(fname)
875 882
876 883 return fparent1
877 884
878 885 def commit(self, text="", user=None, date=None, match=None, force=False,
879 886 editor=False, extra={}):
880 887 """Add a new revision to current repository.
881 888
882 889 Revision information is gathered from the working directory,
883 890 match can be used to filter the committed files. If editor is
884 891 supplied, it is called to get a commit message.
885 892 """
886 893
887 894 def fail(f, msg):
888 895 raise util.Abort('%s: %s' % (f, msg))
889 896
890 897 if not match:
891 898 match = matchmod.always(self.root, '')
892 899
893 900 if not force:
894 901 vdirs = []
895 902 match.dir = vdirs.append
896 903 match.bad = fail
897 904
898 905 wlock = self.wlock()
899 906 try:
900 907 wctx = self[None]
901 908 merge = len(wctx.parents()) > 1
902 909
903 910 if (not force and merge and match and
904 911 (match.files() or match.anypats())):
905 912 raise util.Abort(_('cannot partially commit a merge '
906 913 '(do not specify files or patterns)'))
907 914
908 915 changes = self.status(match=match, clean=force)
909 916 if force:
910 917 changes[0].extend(changes[6]) # mq may commit unchanged files
911 918
912 919 # check subrepos
913 920 subs = []
914 921 removedsubs = set()
915 922 for p in wctx.parents():
916 923 removedsubs.update(s for s in p.substate if match(s))
917 924 for s in wctx.substate:
918 925 removedsubs.discard(s)
919 926 if match(s) and wctx.sub(s).dirty():
920 927 subs.append(s)
921 928 if (subs or removedsubs):
922 929 if (not match('.hgsub') and
923 930 '.hgsub' in (wctx.modified() + wctx.added())):
924 931 raise util.Abort(_("can't commit subrepos without .hgsub"))
925 932 if '.hgsubstate' not in changes[0]:
926 933 changes[0].insert(0, '.hgsubstate')
927 934
928 935 # make sure all explicit patterns are matched
929 936 if not force and match.files():
930 937 matched = set(changes[0] + changes[1] + changes[2])
931 938
932 939 for f in match.files():
933 940 if f == '.' or f in matched or f in wctx.substate:
934 941 continue
935 942 if f in changes[3]: # missing
936 943 fail(f, _('file not found!'))
937 944 if f in vdirs: # visited directory
938 945 d = f + '/'
939 946 for mf in matched:
940 947 if mf.startswith(d):
941 948 break
942 949 else:
943 950 fail(f, _("no match under directory!"))
944 951 elif f not in self.dirstate:
945 952 fail(f, _("file not tracked!"))
946 953
947 954 if (not force and not extra.get("close") and not merge
948 955 and not (changes[0] or changes[1] or changes[2])
949 956 and wctx.branch() == wctx.p1().branch()):
950 957 return None
951 958
952 959 ms = mergemod.mergestate(self)
953 960 for f in changes[0]:
954 961 if f in ms and ms[f] == 'u':
955 962 raise util.Abort(_("unresolved merge conflicts "
956 963 "(see hg resolve)"))
957 964
958 965 cctx = context.workingctx(self, text, user, date, extra, changes)
959 966 if editor:
960 967 cctx._text = editor(self, cctx, subs)
961 968 edited = (text != cctx._text)
962 969
963 970 # commit subs
964 971 if subs or removedsubs:
965 972 state = wctx.substate.copy()
966 973 for s in sorted(subs):
967 974 sub = wctx.sub(s)
968 975 self.ui.status(_('committing subrepository %s\n') %
969 976 subrepo.subrelpath(sub))
970 977 sr = sub.commit(cctx._text, user, date)
971 978 state[s] = (state[s][0], sr)
972 979 subrepo.writestate(self, state)
973 980
974 981 # Save commit message in case this transaction gets rolled back
975 982 # (e.g. by a pretxncommit hook). Leave the content alone on
976 983 # the assumption that the user will use the same editor again.
977 984 msgfile = self.opener('last-message.txt', 'wb')
978 985 msgfile.write(cctx._text)
979 986 msgfile.close()
980 987
981 988 p1, p2 = self.dirstate.parents()
982 989 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
983 990 try:
984 991 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
985 992 ret = self.commitctx(cctx, True)
986 993 except:
987 994 if edited:
988 995 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
989 996 self.ui.write(
990 997 _('note: commit message saved in %s\n') % msgfn)
991 998 raise
992 999
993 1000 # update bookmarks, dirstate and mergestate
994 1001 parents = (p1, p2)
995 1002 if p2 == nullid:
996 1003 parents = (p1,)
997 1004 bookmarks.update(self, parents, ret)
998 1005 for f in changes[0] + changes[1]:
999 1006 self.dirstate.normal(f)
1000 1007 for f in changes[2]:
1001 1008 self.dirstate.forget(f)
1002 1009 self.dirstate.setparents(ret)
1003 1010 ms.reset()
1004 1011 finally:
1005 1012 wlock.release()
1006 1013
1007 1014 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1008 1015 return ret
1009 1016
1010 1017 def commitctx(self, ctx, error=False):
1011 1018 """Add a new revision to current repository.
1012 1019 Revision information is passed via the context argument.
1013 1020 """
1014 1021
1015 1022 tr = lock = None
1016 1023 removed = list(ctx.removed())
1017 1024 p1, p2 = ctx.p1(), ctx.p2()
1018 1025 m1 = p1.manifest().copy()
1019 1026 m2 = p2.manifest()
1020 1027 user = ctx.user()
1021 1028
1022 1029 lock = self.lock()
1023 1030 try:
1024 1031 tr = self.transaction("commit")
1025 1032 trp = weakref.proxy(tr)
1026 1033
1027 1034 # check in files
1028 1035 new = {}
1029 1036 changed = []
1030 1037 linkrev = len(self)
1031 1038 for f in sorted(ctx.modified() + ctx.added()):
1032 1039 self.ui.note(f + "\n")
1033 1040 try:
1034 1041 fctx = ctx[f]
1035 1042 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1036 1043 changed)
1037 1044 m1.set(f, fctx.flags())
1038 1045 except OSError, inst:
1039 1046 self.ui.warn(_("trouble committing %s!\n") % f)
1040 1047 raise
1041 1048 except IOError, inst:
1042 1049 errcode = getattr(inst, 'errno', errno.ENOENT)
1043 1050 if error or errcode and errcode != errno.ENOENT:
1044 1051 self.ui.warn(_("trouble committing %s!\n") % f)
1045 1052 raise
1046 1053 else:
1047 1054 removed.append(f)
1048 1055
1049 1056 # update manifest
1050 1057 m1.update(new)
1051 1058 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1052 1059 drop = [f for f in removed if f in m1]
1053 1060 for f in drop:
1054 1061 del m1[f]
1055 1062 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1056 1063 p2.manifestnode(), (new, drop))
1057 1064
1058 1065 # update changelog
1059 1066 self.changelog.delayupdate()
1060 1067 n = self.changelog.add(mn, changed + removed, ctx.description(),
1061 1068 trp, p1.node(), p2.node(),
1062 1069 user, ctx.date(), ctx.extra().copy())
1063 1070 p = lambda: self.changelog.writepending() and self.root or ""
1064 1071 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1065 1072 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1066 1073 parent2=xp2, pending=p)
1067 1074 self.changelog.finalize(trp)
1068 1075 tr.close()
1069 1076
1070 1077 if self._branchcache:
1071 1078 self.updatebranchcache()
1072 1079 return n
1073 1080 finally:
1074 1081 if tr:
1075 1082 tr.release()
1076 1083 lock.release()
1077 1084
1078 1085 def destroyed(self):
1079 1086 '''Inform the repository that nodes have been destroyed.
1080 1087 Intended for use by strip and rollback, so there's a common
1081 1088 place for anything that has to be done after destroying history.'''
1082 1089 # XXX it might be nice if we could take the list of destroyed
1083 1090 # nodes, but I don't see an easy way for rollback() to do that
1084 1091
1085 1092 # Ensure the persistent tag cache is updated. Doing it now
1086 1093 # means that the tag cache only has to worry about destroyed
1087 1094 # heads immediately after a strip/rollback. That in turn
1088 1095 # guarantees that "cachetip == currenttip" (comparing both rev
1089 1096 # and node) always means no nodes have been added or destroyed.
1090 1097
1091 1098 # XXX this is suboptimal when qrefresh'ing: we strip the current
1092 1099 # head, refresh the tag cache, then immediately add a new head.
1093 1100 # But I think doing it this way is necessary for the "instant
1094 1101 # tag cache retrieval" case to work.
1095 1102 self.invalidatecaches()
1096 1103
1097 1104 def walk(self, match, node=None):
1098 1105 '''
1099 1106 walk recursively through the directory tree or a given
1100 1107 changeset, finding all files matched by the match
1101 1108 function
1102 1109 '''
1103 1110 return self[node].walk(match)
1104 1111
1105 1112 def status(self, node1='.', node2=None, match=None,
1106 1113 ignored=False, clean=False, unknown=False,
1107 1114 listsubrepos=False):
1108 1115 """return status of files between two nodes or node and working directory
1109 1116
1110 1117 If node1 is None, use the first dirstate parent instead.
1111 1118 If node2 is None, compare node1 with working directory.
1112 1119 """
1113 1120
1114 1121 def mfmatches(ctx):
1115 1122 mf = ctx.manifest().copy()
1116 1123 for fn in mf.keys():
1117 1124 if not match(fn):
1118 1125 del mf[fn]
1119 1126 return mf
1120 1127
1121 1128 if isinstance(node1, context.changectx):
1122 1129 ctx1 = node1
1123 1130 else:
1124 1131 ctx1 = self[node1]
1125 1132 if isinstance(node2, context.changectx):
1126 1133 ctx2 = node2
1127 1134 else:
1128 1135 ctx2 = self[node2]
1129 1136
1130 1137 working = ctx2.rev() is None
1131 1138 parentworking = working and ctx1 == self['.']
1132 1139 match = match or matchmod.always(self.root, self.getcwd())
1133 1140 listignored, listclean, listunknown = ignored, clean, unknown
1134 1141
1135 1142 # load earliest manifest first for caching reasons
1136 1143 if not working and ctx2.rev() < ctx1.rev():
1137 1144 ctx2.manifest()
1138 1145
1139 1146 if not parentworking:
1140 1147 def bad(f, msg):
1141 1148 if f not in ctx1:
1142 1149 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1143 1150 match.bad = bad
1144 1151
1145 1152 if working: # we need to scan the working dir
1146 1153 subrepos = []
1147 1154 if '.hgsub' in self.dirstate:
1148 1155 subrepos = ctx1.substate.keys()
1149 1156 s = self.dirstate.status(match, subrepos, listignored,
1150 1157 listclean, listunknown)
1151 1158 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1152 1159
1153 1160 # check for any possibly clean files
1154 1161 if parentworking and cmp:
1155 1162 fixup = []
1156 1163 # do a full compare of any files that might have changed
1157 1164 for f in sorted(cmp):
1158 1165 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1159 1166 or ctx1[f].cmp(ctx2[f])):
1160 1167 modified.append(f)
1161 1168 else:
1162 1169 fixup.append(f)
1163 1170
1164 1171 # update dirstate for files that are actually clean
1165 1172 if fixup:
1166 1173 if listclean:
1167 1174 clean += fixup
1168 1175
1169 1176 try:
1170 1177 # updating the dirstate is optional
1171 1178 # so we don't wait on the lock
1172 1179 wlock = self.wlock(False)
1173 1180 try:
1174 1181 for f in fixup:
1175 1182 self.dirstate.normal(f)
1176 1183 finally:
1177 1184 wlock.release()
1178 1185 except error.LockError:
1179 1186 pass
1180 1187
1181 1188 if not parentworking:
1182 1189 mf1 = mfmatches(ctx1)
1183 1190 if working:
1184 1191 # we are comparing working dir against non-parent
1185 1192 # generate a pseudo-manifest for the working dir
1186 1193 mf2 = mfmatches(self['.'])
1187 1194 for f in cmp + modified + added:
1188 1195 mf2[f] = None
1189 1196 mf2.set(f, ctx2.flags(f))
1190 1197 for f in removed:
1191 1198 if f in mf2:
1192 1199 del mf2[f]
1193 1200 else:
1194 1201 # we are comparing two revisions
1195 1202 deleted, unknown, ignored = [], [], []
1196 1203 mf2 = mfmatches(ctx2)
1197 1204
1198 1205 modified, added, clean = [], [], []
1199 1206 for fn in mf2:
1200 1207 if fn in mf1:
1201 1208 if (mf1.flags(fn) != mf2.flags(fn) or
1202 1209 (mf1[fn] != mf2[fn] and
1203 1210 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1204 1211 modified.append(fn)
1205 1212 elif listclean:
1206 1213 clean.append(fn)
1207 1214 del mf1[fn]
1208 1215 else:
1209 1216 added.append(fn)
1210 1217 removed = mf1.keys()
1211 1218
1212 1219 r = modified, added, removed, deleted, unknown, ignored, clean
1213 1220
1214 1221 if listsubrepos:
1215 1222 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1216 1223 if working:
1217 1224 rev2 = None
1218 1225 else:
1219 1226 rev2 = ctx2.substate[subpath][1]
1220 1227 try:
1221 1228 submatch = matchmod.narrowmatcher(subpath, match)
1222 1229 s = sub.status(rev2, match=submatch, ignored=listignored,
1223 1230 clean=listclean, unknown=listunknown,
1224 1231 listsubrepos=True)
1225 1232 for rfiles, sfiles in zip(r, s):
1226 1233 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1227 1234 except error.LookupError:
1228 1235 self.ui.status(_("skipping missing subrepository: %s\n")
1229 1236 % subpath)
1230 1237
1231 1238 [l.sort() for l in r]
1232 1239 return r
1233 1240
1234 1241 def heads(self, start=None):
1235 1242 heads = self.changelog.heads(start)
1236 1243 # sort the output in rev descending order
1237 1244 return sorted(heads, key=self.changelog.rev, reverse=True)
1238 1245
1239 1246 def branchheads(self, branch=None, start=None, closed=False):
1240 1247 '''return a (possibly filtered) list of heads for the given branch
1241 1248
1242 1249 Heads are returned in topological order, from newest to oldest.
1243 1250 If branch is None, use the dirstate branch.
1244 1251 If start is not None, return only heads reachable from start.
1245 1252 If closed is True, return heads that are marked as closed as well.
1246 1253 '''
1247 1254 if branch is None:
1248 1255 branch = self[None].branch()
1249 1256 branches = self.branchmap()
1250 1257 if branch not in branches:
1251 1258 return []
1252 1259 # the cache returns heads ordered lowest to highest
1253 1260 bheads = list(reversed(branches[branch]))
1254 1261 if start is not None:
1255 1262 # filter out the heads that cannot be reached from startrev
1256 1263 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1257 1264 bheads = [h for h in bheads if h in fbheads]
1258 1265 if not closed:
1259 1266 bheads = [h for h in bheads if
1260 1267 ('close' not in self.changelog.read(h)[5])]
1261 1268 return bheads
1262 1269
1263 1270 def branches(self, nodes):
1264 1271 if not nodes:
1265 1272 nodes = [self.changelog.tip()]
1266 1273 b = []
1267 1274 for n in nodes:
1268 1275 t = n
1269 1276 while 1:
1270 1277 p = self.changelog.parents(n)
1271 1278 if p[1] != nullid or p[0] == nullid:
1272 1279 b.append((t, n, p[0], p[1]))
1273 1280 break
1274 1281 n = p[0]
1275 1282 return b
1276 1283
1277 1284 def between(self, pairs):
1278 1285 r = []
1279 1286
1280 1287 for top, bottom in pairs:
1281 1288 n, l, i = top, [], 0
1282 1289 f = 1
1283 1290
1284 1291 while n != bottom and n != nullid:
1285 1292 p = self.changelog.parents(n)[0]
1286 1293 if i == f:
1287 1294 l.append(n)
1288 1295 f = f * 2
1289 1296 n = p
1290 1297 i += 1
1291 1298
1292 1299 r.append(l)
1293 1300
1294 1301 return r
1295 1302
1296 1303 def pull(self, remote, heads=None, force=False):
1297 1304 lock = self.lock()
1298 1305 try:
1299 1306 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1300 1307 force=force)
1301 1308 common, fetch, rheads = tmp
1302 1309 if not fetch:
1303 1310 self.ui.status(_("no changes found\n"))
1304 1311 result = 0
1305 1312 else:
1306 1313 if heads is None and fetch == [nullid]:
1307 1314 self.ui.status(_("requesting all changes\n"))
1308 1315 elif heads is None and remote.capable('changegroupsubset'):
1309 1316 # issue1320, avoid a race if remote changed after discovery
1310 1317 heads = rheads
1311 1318
1312 1319 if heads is None:
1313 1320 cg = remote.changegroup(fetch, 'pull')
1314 1321 elif not remote.capable('changegroupsubset'):
1315 1322 raise util.Abort(_("partial pull cannot be done because "
1316 1323 "other repository doesn't support "
1317 1324 "changegroupsubset."))
1318 1325 else:
1319 1326 cg = remote.changegroupsubset(fetch, heads, 'pull')
1320 1327 result = self.addchangegroup(cg, 'pull', remote.url(),
1321 1328 lock=lock)
1322 1329 finally:
1323 1330 lock.release()
1324 1331
1325 1332 self.ui.debug("checking for updated bookmarks\n")
1326 1333 rb = remote.listkeys('bookmarks')
1327 1334 changed = False
1328 1335 for k in rb.keys():
1329 1336 if k in self._bookmarks:
1330 1337 nr, nl = rb[k], self._bookmarks[k]
1331 1338 if nr in self:
1332 1339 cr = self[nr]
1333 1340 cl = self[nl]
1334 1341 if cl.rev() >= cr.rev():
1335 1342 continue
1336 1343 if cr in cl.descendants():
1337 1344 self._bookmarks[k] = cr.node()
1338 1345 changed = True
1339 1346 self.ui.status(_("updating bookmark %s\n") % k)
1340 1347 else:
1341 1348 self.ui.warn(_("not updating divergent"
1342 1349 " bookmark %s\n") % k)
1343 1350 if changed:
1344 1351 bookmarks.write(self)
1345 1352
1346 1353 return result
1347 1354
1348 1355 def checkpush(self, force, revs):
1349 1356 """Extensions can override this function if additional checks have
1350 1357 to be performed before pushing, or call it if they override push
1351 1358 command.
1352 1359 """
1353 1360 pass
1354 1361
1355 1362 def push(self, remote, force=False, revs=None, newbranch=False):
1356 1363 '''Push outgoing changesets (limited by revs) from the current
1357 1364 repository to remote. Return an integer:
1358 1365 - 0 means HTTP error *or* nothing to push
1359 1366 - 1 means we pushed and remote head count is unchanged *or*
1360 1367 we have outgoing changesets but refused to push
1361 1368 - other values as described by addchangegroup()
1362 1369 '''
1363 1370 # there are two ways to push to remote repo:
1364 1371 #
1365 1372 # addchangegroup assumes local user can lock remote
1366 1373 # repo (local filesystem, old ssh servers).
1367 1374 #
1368 1375 # unbundle assumes local user cannot lock remote repo (new ssh
1369 1376 # servers, http servers).
1370 1377
1371 1378 self.checkpush(force, revs)
1372 1379 lock = None
1373 1380 unbundle = remote.capable('unbundle')
1374 1381 if not unbundle:
1375 1382 lock = remote.lock()
1376 1383 try:
1377 1384 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1378 1385 newbranch)
1379 1386 ret = remote_heads
1380 1387 if cg is not None:
1381 1388 if unbundle:
1382 1389 # local repo finds heads on server, finds out what
1383 1390 # revs it must push. once revs transferred, if server
1384 1391 # finds it has different heads (someone else won
1385 1392 # commit/push race), server aborts.
1386 1393 if force:
1387 1394 remote_heads = ['force']
1388 1395 # ssh: return remote's addchangegroup()
1389 1396 # http: return remote's addchangegroup() or 0 for error
1390 1397 ret = remote.unbundle(cg, remote_heads, 'push')
1391 1398 else:
1392 1399 # we return an integer indicating remote head count change
1393 1400 ret = remote.addchangegroup(cg, 'push', self.url(),
1394 1401 lock=lock)
1395 1402 finally:
1396 1403 if lock is not None:
1397 1404 lock.release()
1398 1405
1399 1406 self.ui.debug("checking for updated bookmarks\n")
1400 1407 rb = remote.listkeys('bookmarks')
1401 1408 for k in rb.keys():
1402 1409 if k in self._bookmarks:
1403 1410 nr, nl = rb[k], hex(self._bookmarks[k])
1404 1411 if nr in self:
1405 1412 cr = self[nr]
1406 1413 cl = self[nl]
1407 1414 if cl in cr.descendants():
1408 1415 r = remote.pushkey('bookmarks', k, nr, nl)
1409 1416 if r:
1410 1417 self.ui.status(_("updating bookmark %s\n") % k)
1411 1418 else:
1412 1419 self.ui.warn(_('updating bookmark %s'
1413 1420 ' failed!\n') % k)
1414 1421
1415 1422 return ret
1416 1423
1417 1424 def changegroupinfo(self, nodes, source):
1418 1425 if self.ui.verbose or source == 'bundle':
1419 1426 self.ui.status(_("%d changesets found\n") % len(nodes))
1420 1427 if self.ui.debugflag:
1421 1428 self.ui.debug("list of changesets:\n")
1422 1429 for node in nodes:
1423 1430 self.ui.debug("%s\n" % hex(node))
1424 1431
1425 1432 def changegroupsubset(self, bases, heads, source, extranodes=None):
1426 1433 """Compute a changegroup consisting of all the nodes that are
1427 1434 descendents of any of the bases and ancestors of any of the heads.
1428 1435 Return a chunkbuffer object whose read() method will return
1429 1436 successive changegroup chunks.
1430 1437
1431 1438 It is fairly complex as determining which filenodes and which
1432 1439 manifest nodes need to be included for the changeset to be complete
1433 1440 is non-trivial.
1434 1441
1435 1442 Another wrinkle is doing the reverse, figuring out which changeset in
1436 1443 the changegroup a particular filenode or manifestnode belongs to.
1437 1444
1438 1445 The caller can specify some nodes that must be included in the
1439 1446 changegroup using the extranodes argument. It should be a dict
1440 1447 where the keys are the filenames (or 1 for the manifest), and the
1441 1448 values are lists of (node, linknode) tuples, where node is a wanted
1442 1449 node and linknode is the changelog node that should be transmitted as
1443 1450 the linkrev.
1444 1451 """
1445 1452
1446 1453 # Set up some initial variables
1447 1454 # Make it easy to refer to self.changelog
1448 1455 cl = self.changelog
1449 1456 # Compute the list of changesets in this changegroup.
1450 1457 # Some bases may turn out to be superfluous, and some heads may be
1451 1458 # too. nodesbetween will return the minimal set of bases and heads
1452 1459 # necessary to re-create the changegroup.
1453 1460 if not bases:
1454 1461 bases = [nullid]
1455 1462 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1456 1463
1457 1464 if extranodes is None:
1458 1465 # can we go through the fast path ?
1459 1466 heads.sort()
1460 1467 allheads = self.heads()
1461 1468 allheads.sort()
1462 1469 if heads == allheads:
1463 1470 return self._changegroup(msng_cl_lst, source)
1464 1471
1465 1472 # slow path
1466 1473 self.hook('preoutgoing', throw=True, source=source)
1467 1474
1468 1475 self.changegroupinfo(msng_cl_lst, source)
1469 1476
1470 1477 # We assume that all ancestors of bases are known
1471 1478 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1472 1479
1473 1480 # Make it easy to refer to self.manifest
1474 1481 mnfst = self.manifest
1475 1482 # We don't know which manifests are missing yet
1476 1483 msng_mnfst_set = {}
1477 1484 # Nor do we know which filenodes are missing.
1478 1485 msng_filenode_set = {}
1479 1486
1480 1487 # A changeset always belongs to itself, so the changenode lookup
1481 1488 # function for a changenode is identity.
1482 1489 def identity(x):
1483 1490 return x
1484 1491
1485 1492 # A function generating function that sets up the initial environment
1486 1493 # the inner function.
1487 1494 def filenode_collector(changedfiles):
1488 1495 # This gathers information from each manifestnode included in the
1489 1496 # changegroup about which filenodes the manifest node references
1490 1497 # so we can include those in the changegroup too.
1491 1498 #
1492 1499 # It also remembers which changenode each filenode belongs to. It
1493 1500 # does this by assuming the a filenode belongs to the changenode
1494 1501 # the first manifest that references it belongs to.
1495 1502 def collect_msng_filenodes(mnfstnode):
1496 1503 r = mnfst.rev(mnfstnode)
1497 1504 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1498 1505 # If the previous rev is one of the parents,
1499 1506 # we only need to see a diff.
1500 1507 deltamf = mnfst.readdelta(mnfstnode)
1501 1508 # For each line in the delta
1502 1509 for f, fnode in deltamf.iteritems():
1503 1510 # And if the file is in the list of files we care
1504 1511 # about.
1505 1512 if f in changedfiles:
1506 1513 # Get the changenode this manifest belongs to
1507 1514 clnode = msng_mnfst_set[mnfstnode]
1508 1515 # Create the set of filenodes for the file if
1509 1516 # there isn't one already.
1510 1517 ndset = msng_filenode_set.setdefault(f, {})
1511 1518 # And set the filenode's changelog node to the
1512 1519 # manifest's if it hasn't been set already.
1513 1520 ndset.setdefault(fnode, clnode)
1514 1521 else:
1515 1522 # Otherwise we need a full manifest.
1516 1523 m = mnfst.read(mnfstnode)
1517 1524 # For every file in we care about.
1518 1525 for f in changedfiles:
1519 1526 fnode = m.get(f, None)
1520 1527 # If it's in the manifest
1521 1528 if fnode is not None:
1522 1529 # See comments above.
1523 1530 clnode = msng_mnfst_set[mnfstnode]
1524 1531 ndset = msng_filenode_set.setdefault(f, {})
1525 1532 ndset.setdefault(fnode, clnode)
1526 1533 return collect_msng_filenodes
1527 1534
1528 1535 # If we determine that a particular file or manifest node must be a
1529 1536 # node that the recipient of the changegroup will already have, we can
1530 1537 # also assume the recipient will have all the parents. This function
1531 1538 # prunes them from the set of missing nodes.
1532 1539 def prune(revlog, missingnodes):
1533 1540 hasset = set()
1534 1541 # If a 'missing' filenode thinks it belongs to a changenode we
1535 1542 # assume the recipient must have, then the recipient must have
1536 1543 # that filenode.
1537 1544 for n in missingnodes:
1538 1545 clrev = revlog.linkrev(revlog.rev(n))
1539 1546 if clrev in commonrevs:
1540 1547 hasset.add(n)
1541 1548 for n in hasset:
1542 1549 missingnodes.pop(n, None)
1543 1550 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1544 1551 missingnodes.pop(revlog.node(r), None)
1545 1552
1546 1553 # Add the nodes that were explicitly requested.
1547 1554 def add_extra_nodes(name, nodes):
1548 1555 if not extranodes or name not in extranodes:
1549 1556 return
1550 1557
1551 1558 for node, linknode in extranodes[name]:
1552 1559 if node not in nodes:
1553 1560 nodes[node] = linknode
1554 1561
1555 1562 # Now that we have all theses utility functions to help out and
1556 1563 # logically divide up the task, generate the group.
1557 1564 def gengroup():
1558 1565 # The set of changed files starts empty.
1559 1566 changedfiles = set()
1560 1567 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1561 1568
1562 1569 # Create a changenode group generator that will call our functions
1563 1570 # back to lookup the owning changenode and collect information.
1564 1571 group = cl.group(msng_cl_lst, identity, collect)
1565 1572 for cnt, chnk in enumerate(group):
1566 1573 yield chnk
1567 1574 # revlog.group yields three entries per node, so
1568 1575 # dividing by 3 gives an approximation of how many
1569 1576 # nodes have been processed.
1570 1577 self.ui.progress(_('bundling'), cnt / 3,
1571 1578 unit=_('changesets'))
1572 1579 changecount = cnt / 3
1573 1580 self.ui.progress(_('bundling'), None)
1574 1581
1575 1582 prune(mnfst, msng_mnfst_set)
1576 1583 add_extra_nodes(1, msng_mnfst_set)
1577 1584 msng_mnfst_lst = msng_mnfst_set.keys()
1578 1585 # Sort the manifestnodes by revision number.
1579 1586 msng_mnfst_lst.sort(key=mnfst.rev)
1580 1587 # Create a generator for the manifestnodes that calls our lookup
1581 1588 # and data collection functions back.
1582 1589 group = mnfst.group(msng_mnfst_lst,
1583 1590 lambda mnode: msng_mnfst_set[mnode],
1584 1591 filenode_collector(changedfiles))
1585 1592 efiles = {}
1586 1593 for cnt, chnk in enumerate(group):
1587 1594 if cnt % 3 == 1:
1588 1595 mnode = chnk[:20]
1589 1596 efiles.update(mnfst.readdelta(mnode))
1590 1597 yield chnk
1591 1598 # see above comment for why we divide by 3
1592 1599 self.ui.progress(_('bundling'), cnt / 3,
1593 1600 unit=_('manifests'), total=changecount)
1594 1601 self.ui.progress(_('bundling'), None)
1595 1602 efiles = len(efiles)
1596 1603
1597 1604 # These are no longer needed, dereference and toss the memory for
1598 1605 # them.
1599 1606 msng_mnfst_lst = None
1600 1607 msng_mnfst_set.clear()
1601 1608
1602 1609 if extranodes:
1603 1610 for fname in extranodes:
1604 1611 if isinstance(fname, int):
1605 1612 continue
1606 1613 msng_filenode_set.setdefault(fname, {})
1607 1614 changedfiles.add(fname)
1608 1615 # Go through all our files in order sorted by name.
1609 1616 for idx, fname in enumerate(sorted(changedfiles)):
1610 1617 filerevlog = self.file(fname)
1611 1618 if not len(filerevlog):
1612 1619 raise util.Abort(_("empty or missing revlog for %s") % fname)
1613 1620 # Toss out the filenodes that the recipient isn't really
1614 1621 # missing.
1615 1622 missingfnodes = msng_filenode_set.pop(fname, {})
1616 1623 prune(filerevlog, missingfnodes)
1617 1624 add_extra_nodes(fname, missingfnodes)
1618 1625 # If any filenodes are left, generate the group for them,
1619 1626 # otherwise don't bother.
1620 1627 if missingfnodes:
1621 1628 yield changegroup.chunkheader(len(fname))
1622 1629 yield fname
1623 1630 # Sort the filenodes by their revision # (topological order)
1624 1631 nodeiter = list(missingfnodes)
1625 1632 nodeiter.sort(key=filerevlog.rev)
1626 1633 # Create a group generator and only pass in a changenode
1627 1634 # lookup function as we need to collect no information
1628 1635 # from filenodes.
1629 1636 group = filerevlog.group(nodeiter,
1630 1637 lambda fnode: missingfnodes[fnode])
1631 1638 for chnk in group:
1632 1639 # even though we print the same progress on
1633 1640 # most loop iterations, put the progress call
1634 1641 # here so that time estimates (if any) can be updated
1635 1642 self.ui.progress(
1636 1643 _('bundling'), idx, item=fname,
1637 1644 unit=_('files'), total=efiles)
1638 1645 yield chnk
1639 1646 # Signal that no more groups are left.
1640 1647 yield changegroup.closechunk()
1641 1648 self.ui.progress(_('bundling'), None)
1642 1649
1643 1650 if msng_cl_lst:
1644 1651 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1645 1652
1646 1653 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1647 1654
1648 1655 def changegroup(self, basenodes, source):
1649 1656 # to avoid a race we use changegroupsubset() (issue1320)
1650 1657 return self.changegroupsubset(basenodes, self.heads(), source)
1651 1658
1652 1659 def _changegroup(self, nodes, source):
1653 1660 """Compute the changegroup of all nodes that we have that a recipient
1654 1661 doesn't. Return a chunkbuffer object whose read() method will return
1655 1662 successive changegroup chunks.
1656 1663
1657 1664 This is much easier than the previous function as we can assume that
1658 1665 the recipient has any changenode we aren't sending them.
1659 1666
1660 1667 nodes is the set of nodes to send"""
1661 1668
1662 1669 self.hook('preoutgoing', throw=True, source=source)
1663 1670
1664 1671 cl = self.changelog
1665 1672 revset = set([cl.rev(n) for n in nodes])
1666 1673 self.changegroupinfo(nodes, source)
1667 1674
1668 1675 def identity(x):
1669 1676 return x
1670 1677
1671 1678 def gennodelst(log):
1672 1679 for r in log:
1673 1680 if log.linkrev(r) in revset:
1674 1681 yield log.node(r)
1675 1682
1676 1683 def lookuplinkrev_func(revlog):
1677 1684 def lookuplinkrev(n):
1678 1685 return cl.node(revlog.linkrev(revlog.rev(n)))
1679 1686 return lookuplinkrev
1680 1687
1681 1688 def gengroup():
1682 1689 '''yield a sequence of changegroup chunks (strings)'''
1683 1690 # construct a list of all changed files
1684 1691 changedfiles = set()
1685 1692 mmfs = {}
1686 1693 collect = changegroup.collector(cl, mmfs, changedfiles)
1687 1694
1688 1695 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1689 1696 # revlog.group yields three entries per node, so
1690 1697 # dividing by 3 gives an approximation of how many
1691 1698 # nodes have been processed.
1692 1699 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1693 1700 yield chnk
1694 1701 changecount = cnt / 3
1695 1702 self.ui.progress(_('bundling'), None)
1696 1703
1697 1704 mnfst = self.manifest
1698 1705 nodeiter = gennodelst(mnfst)
1699 1706 efiles = {}
1700 1707 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1701 1708 lookuplinkrev_func(mnfst))):
1702 1709 if cnt % 3 == 1:
1703 1710 mnode = chnk[:20]
1704 1711 efiles.update(mnfst.readdelta(mnode))
1705 1712 # see above comment for why we divide by 3
1706 1713 self.ui.progress(_('bundling'), cnt / 3,
1707 1714 unit=_('manifests'), total=changecount)
1708 1715 yield chnk
1709 1716 efiles = len(efiles)
1710 1717 self.ui.progress(_('bundling'), None)
1711 1718
1712 1719 for idx, fname in enumerate(sorted(changedfiles)):
1713 1720 filerevlog = self.file(fname)
1714 1721 if not len(filerevlog):
1715 1722 raise util.Abort(_("empty or missing revlog for %s") % fname)
1716 1723 nodeiter = gennodelst(filerevlog)
1717 1724 nodeiter = list(nodeiter)
1718 1725 if nodeiter:
1719 1726 yield changegroup.chunkheader(len(fname))
1720 1727 yield fname
1721 1728 lookup = lookuplinkrev_func(filerevlog)
1722 1729 for chnk in filerevlog.group(nodeiter, lookup):
1723 1730 self.ui.progress(
1724 1731 _('bundling'), idx, item=fname,
1725 1732 total=efiles, unit=_('files'))
1726 1733 yield chnk
1727 1734 self.ui.progress(_('bundling'), None)
1728 1735
1729 1736 yield changegroup.closechunk()
1730 1737
1731 1738 if nodes:
1732 1739 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733 1740
1734 1741 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1735 1742
1736 1743 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1737 1744 """Add the changegroup returned by source.read() to this repo.
1738 1745 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1739 1746 the URL of the repo where this changegroup is coming from.
1740 1747 If lock is not None, the function takes ownership of the lock
1741 1748 and releases it after the changegroup is added.
1742 1749
1743 1750 Return an integer summarizing the change to this repo:
1744 1751 - nothing changed or no source: 0
1745 1752 - more heads than before: 1+added heads (2..n)
1746 1753 - fewer heads than before: -1-removed heads (-2..-n)
1747 1754 - number of heads stays the same: 1
1748 1755 """
1749 1756 def csmap(x):
1750 1757 self.ui.debug("add changeset %s\n" % short(x))
1751 1758 return len(cl)
1752 1759
1753 1760 def revmap(x):
1754 1761 return cl.rev(x)
1755 1762
1756 1763 if not source:
1757 1764 return 0
1758 1765
1759 1766 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1760 1767
1761 1768 changesets = files = revisions = 0
1762 1769 efiles = set()
1763 1770
1764 1771 # write changelog data to temp files so concurrent readers will not see
1765 1772 # inconsistent view
1766 1773 cl = self.changelog
1767 1774 cl.delayupdate()
1768 1775 oldheads = len(cl.heads())
1769 1776
1770 1777 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1771 1778 try:
1772 1779 trp = weakref.proxy(tr)
1773 1780 # pull off the changeset group
1774 1781 self.ui.status(_("adding changesets\n"))
1775 1782 clstart = len(cl)
1776 1783 class prog(object):
1777 1784 step = _('changesets')
1778 1785 count = 1
1779 1786 ui = self.ui
1780 1787 total = None
1781 1788 def __call__(self):
1782 1789 self.ui.progress(self.step, self.count, unit=_('chunks'),
1783 1790 total=self.total)
1784 1791 self.count += 1
1785 1792 pr = prog()
1786 1793 source.callback = pr
1787 1794
1788 1795 if (cl.addgroup(source, csmap, trp) is None
1789 1796 and not emptyok):
1790 1797 raise util.Abort(_("received changelog group is empty"))
1791 1798 clend = len(cl)
1792 1799 changesets = clend - clstart
1793 1800 for c in xrange(clstart, clend):
1794 1801 efiles.update(self[c].files())
1795 1802 efiles = len(efiles)
1796 1803 self.ui.progress(_('changesets'), None)
1797 1804
1798 1805 # pull off the manifest group
1799 1806 self.ui.status(_("adding manifests\n"))
1800 1807 pr.step = _('manifests')
1801 1808 pr.count = 1
1802 1809 pr.total = changesets # manifests <= changesets
1803 1810 # no need to check for empty manifest group here:
1804 1811 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1805 1812 # no new manifest will be created and the manifest group will
1806 1813 # be empty during the pull
1807 1814 self.manifest.addgroup(source, revmap, trp)
1808 1815 self.ui.progress(_('manifests'), None)
1809 1816
1810 1817 needfiles = {}
1811 1818 if self.ui.configbool('server', 'validate', default=False):
1812 1819 # validate incoming csets have their manifests
1813 1820 for cset in xrange(clstart, clend):
1814 1821 mfest = self.changelog.read(self.changelog.node(cset))[0]
1815 1822 mfest = self.manifest.readdelta(mfest)
1816 1823 # store file nodes we must see
1817 1824 for f, n in mfest.iteritems():
1818 1825 needfiles.setdefault(f, set()).add(n)
1819 1826
1820 1827 # process the files
1821 1828 self.ui.status(_("adding file changes\n"))
1822 1829 pr.step = 'files'
1823 1830 pr.count = 1
1824 1831 pr.total = efiles
1825 1832 source.callback = None
1826 1833
1827 1834 while 1:
1828 1835 f = source.chunk()
1829 1836 if not f:
1830 1837 break
1831 1838 self.ui.debug("adding %s revisions\n" % f)
1832 1839 pr()
1833 1840 fl = self.file(f)
1834 1841 o = len(fl)
1835 1842 if fl.addgroup(source, revmap, trp) is None:
1836 1843 raise util.Abort(_("received file revlog group is empty"))
1837 1844 revisions += len(fl) - o
1838 1845 files += 1
1839 1846 if f in needfiles:
1840 1847 needs = needfiles[f]
1841 1848 for new in xrange(o, len(fl)):
1842 1849 n = fl.node(new)
1843 1850 if n in needs:
1844 1851 needs.remove(n)
1845 1852 if not needs:
1846 1853 del needfiles[f]
1847 1854 self.ui.progress(_('files'), None)
1848 1855
1849 1856 for f, needs in needfiles.iteritems():
1850 1857 fl = self.file(f)
1851 1858 for n in needs:
1852 1859 try:
1853 1860 fl.rev(n)
1854 1861 except error.LookupError:
1855 1862 raise util.Abort(
1856 1863 _('missing file data for %s:%s - run hg verify') %
1857 1864 (f, hex(n)))
1858 1865
1859 1866 newheads = len(cl.heads())
1860 1867 heads = ""
1861 1868 if oldheads and newheads != oldheads:
1862 1869 heads = _(" (%+d heads)") % (newheads - oldheads)
1863 1870
1864 1871 self.ui.status(_("added %d changesets"
1865 1872 " with %d changes to %d files%s\n")
1866 1873 % (changesets, revisions, files, heads))
1867 1874
1868 1875 if changesets > 0:
1869 1876 p = lambda: cl.writepending() and self.root or ""
1870 1877 self.hook('pretxnchangegroup', throw=True,
1871 1878 node=hex(cl.node(clstart)), source=srctype,
1872 1879 url=url, pending=p)
1873 1880
1874 1881 # make changelog see real files again
1875 1882 cl.finalize(trp)
1876 1883
1877 1884 tr.close()
1878 1885 finally:
1879 1886 tr.release()
1880 1887 if lock:
1881 1888 lock.release()
1882 1889
1883 1890 if changesets > 0:
1884 1891 # forcefully update the on-disk branch cache
1885 1892 self.ui.debug("updating the branch cache\n")
1886 1893 self.updatebranchcache()
1887 1894 self.hook("changegroup", node=hex(cl.node(clstart)),
1888 1895 source=srctype, url=url)
1889 1896
1890 1897 for i in xrange(clstart, clend):
1891 1898 self.hook("incoming", node=hex(cl.node(i)),
1892 1899 source=srctype, url=url)
1893 1900
1894 1901 # FIXME - why does this care about tip?
1895 1902 if newheads == oldheads:
1896 1903 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1897 1904
1898 1905 # never return 0 here:
1899 1906 if newheads < oldheads:
1900 1907 return newheads - oldheads - 1
1901 1908 else:
1902 1909 return newheads - oldheads + 1
1903 1910
1904 1911
1905 1912 def stream_in(self, remote, requirements):
1906 1913 fp = remote.stream_out()
1907 1914 l = fp.readline()
1908 1915 try:
1909 1916 resp = int(l)
1910 1917 except ValueError:
1911 1918 raise error.ResponseError(
1912 1919 _('Unexpected response from remote server:'), l)
1913 1920 if resp == 1:
1914 1921 raise util.Abort(_('operation forbidden by server'))
1915 1922 elif resp == 2:
1916 1923 raise util.Abort(_('locking the remote repository failed'))
1917 1924 elif resp != 0:
1918 1925 raise util.Abort(_('the server sent an unknown error code'))
1919 1926 self.ui.status(_('streaming all changes\n'))
1920 1927 l = fp.readline()
1921 1928 try:
1922 1929 total_files, total_bytes = map(int, l.split(' ', 1))
1923 1930 except (ValueError, TypeError):
1924 1931 raise error.ResponseError(
1925 1932 _('Unexpected response from remote server:'), l)
1926 1933 self.ui.status(_('%d files to transfer, %s of data\n') %
1927 1934 (total_files, util.bytecount(total_bytes)))
1928 1935 start = time.time()
1929 1936 for i in xrange(total_files):
1930 1937 # XXX doesn't support '\n' or '\r' in filenames
1931 1938 l = fp.readline()
1932 1939 try:
1933 1940 name, size = l.split('\0', 1)
1934 1941 size = int(size)
1935 1942 except (ValueError, TypeError):
1936 1943 raise error.ResponseError(
1937 1944 _('Unexpected response from remote server:'), l)
1938 1945 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1939 1946 # for backwards compat, name was partially encoded
1940 1947 ofp = self.sopener(store.decodedir(name), 'w')
1941 1948 for chunk in util.filechunkiter(fp, limit=size):
1942 1949 ofp.write(chunk)
1943 1950 ofp.close()
1944 1951 elapsed = time.time() - start
1945 1952 if elapsed <= 0:
1946 1953 elapsed = 0.001
1947 1954 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1948 1955 (util.bytecount(total_bytes), elapsed,
1949 1956 util.bytecount(total_bytes / elapsed)))
1950 1957
1951 1958 # new requirements = old non-format requirements + new format-related
1952 1959 # requirements from the streamed-in repository
1953 1960 requirements.update(set(self.requirements) - self.supportedformats)
1954 1961 self._applyrequirements(requirements)
1955 1962 self._writerequirements()
1956 1963
1957 1964 self.invalidate()
1958 1965 return len(self.heads()) + 1
1959 1966
1960 1967 def clone(self, remote, heads=[], stream=False):
1961 1968 '''clone remote repository.
1962 1969
1963 1970 keyword arguments:
1964 1971 heads: list of revs to clone (forces use of pull)
1965 1972 stream: use streaming clone if possible'''
1966 1973
1967 1974 # now, all clients that can request uncompressed clones can
1968 1975 # read repo formats supported by all servers that can serve
1969 1976 # them.
1970 1977
1971 1978 # if revlog format changes, client will have to check version
1972 1979 # and format flags on "stream" capability, and use
1973 1980 # uncompressed only if compatible.
1974 1981
1975 1982 if stream and not heads:
1976 1983 # 'stream' means remote revlog format is revlogv1 only
1977 1984 if remote.capable('stream'):
1978 1985 return self.stream_in(remote, set(('revlogv1',)))
1979 1986 # otherwise, 'streamreqs' contains the remote revlog format
1980 1987 streamreqs = remote.capable('streamreqs')
1981 1988 if streamreqs:
1982 1989 streamreqs = set(streamreqs.split(','))
1983 1990 # if we support it, stream in and adjust our requirements
1984 1991 if not streamreqs - self.supportedformats:
1985 1992 return self.stream_in(remote, streamreqs)
1986 1993 return self.pull(remote, heads)
1987 1994
1988 1995 def pushkey(self, namespace, key, old, new):
1989 1996 return pushkey.push(self, namespace, key, old, new)
1990 1997
1991 1998 def listkeys(self, namespace):
1992 1999 return pushkey.list(self, namespace)
1993 2000
1994 2001 # used to avoid circular references so destructors work
1995 2002 def aftertrans(files):
1996 2003 renamefiles = [tuple(t) for t in files]
1997 2004 def a():
1998 2005 for src, dest in renamefiles:
1999 2006 util.rename(src, dest)
2000 2007 return a
2001 2008
2002 2009 def instance(ui, path, create):
2003 2010 return localrepository(ui, util.drop_scheme('file', path), create)
2004 2011
2005 2012 def islocal(path):
2006 2013 return True
General Comments 0
You need to be logged in to leave comments. Login now