##// END OF EJS Templates
manifest: rename ambiguously-named set to setflag...
Augie Fackler -
r22942:03602f76 default
parent child Browse files
Show More
@@ -1,1717 +1,1717 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 class basectx(object):
21 21 """A basectx object represents the common logic for its children:
22 22 changectx: read-only context that is already present in the repo,
23 23 workingctx: a context that represents the working directory and can
24 24 be committed,
25 25 memctx: a context that represents changes in-memory and can also
26 26 be committed."""
27 27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 28 if isinstance(changeid, basectx):
29 29 return changeid
30 30
31 31 o = super(basectx, cls).__new__(cls)
32 32
33 33 o._repo = repo
34 34 o._rev = nullrev
35 35 o._node = nullid
36 36
37 37 return o
38 38
39 39 def __str__(self):
40 40 return short(self.node())
41 41
42 42 def __int__(self):
43 43 return self.rev()
44 44
45 45 def __repr__(self):
46 46 return "<%s %s>" % (type(self).__name__, str(self))
47 47
48 48 def __eq__(self, other):
49 49 try:
50 50 return type(self) == type(other) and self._rev == other._rev
51 51 except AttributeError:
52 52 return False
53 53
54 54 def __ne__(self, other):
55 55 return not (self == other)
56 56
57 57 def __contains__(self, key):
58 58 return key in self._manifest
59 59
60 60 def __getitem__(self, key):
61 61 return self.filectx(key)
62 62
63 63 def __iter__(self):
64 64 for f in sorted(self._manifest):
65 65 yield f
66 66
67 67 def _manifestmatches(self, match, s):
68 68 """generate a new manifest filtered by the match argument
69 69
70 70 This method is for internal use only and mainly exists to provide an
71 71 object oriented way for other contexts to customize the manifest
72 72 generation.
73 73 """
74 74 if match.always():
75 75 return self.manifest().copy()
76 76
77 77 files = match.files()
78 78 if (match.matchfn == match.exact or
79 79 (not match.anypats() and util.all(fn in self for fn in files))):
80 80 return self.manifest().intersectfiles(files)
81 81
82 82 mf = self.manifest().copy()
83 83 for fn in mf.keys():
84 84 if not match(fn):
85 85 del mf[fn]
86 86 return mf
87 87
88 88 def _matchstatus(self, other, s, match, listignored, listclean,
89 89 listunknown):
90 90 """return match.always if match is none
91 91
92 92 This internal method provides a way for child objects to override the
93 93 match operator.
94 94 """
95 95 return match or matchmod.always(self._repo.root, self._repo.getcwd())
96 96
97 97 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
98 98 """provide a hook to allow child objects to preprocess status results
99 99
100 100 For example, this allows other contexts, such as workingctx, to query
101 101 the dirstate before comparing the manifests.
102 102 """
103 103 # load earliest manifest first for caching reasons
104 104 if self.rev() < other.rev():
105 105 self.manifest()
106 106 return s
107 107
108 108 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
109 109 """provide a hook to allow child objects to postprocess status results
110 110
111 111 For example, this allows other contexts, such as workingctx, to filter
112 112 suspect symlinks in the case of FAT32 and NTFS filesytems.
113 113 """
114 114 return s
115 115
116 116 def _buildstatus(self, other, s, match, listignored, listclean,
117 117 listunknown):
118 118 """build a status with respect to another context"""
119 119 mf1 = other._manifestmatches(match, s)
120 120 mf2 = self._manifestmatches(match, s)
121 121
122 122 modified, added, clean = [], [], []
123 123 deleted, unknown, ignored = s[3], s[4], s[5]
124 124 withflags = mf1.withflags() | mf2.withflags()
125 125 for fn, mf2node in mf2.iteritems():
126 126 if fn in mf1:
127 127 if (fn not in deleted and
128 128 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
129 129 (mf1[fn] != mf2node and
130 130 (mf2node or self[fn].cmp(other[fn]))))):
131 131 modified.append(fn)
132 132 elif listclean:
133 133 clean.append(fn)
134 134 del mf1[fn]
135 135 elif fn not in deleted:
136 136 added.append(fn)
137 137 removed = mf1.keys()
138 138 if removed:
139 139 # need to filter files if they are already reported as removed
140 140 unknown = [fn for fn in unknown if fn not in mf1]
141 141 ignored = [fn for fn in ignored if fn not in mf1]
142 142
143 143 return [modified, added, removed, deleted, unknown, ignored, clean]
144 144
145 145 @propertycache
146 146 def substate(self):
147 147 return subrepo.state(self, self._repo.ui)
148 148
149 149 def subrev(self, subpath):
150 150 return self.substate[subpath][1]
151 151
152 152 def rev(self):
153 153 return self._rev
154 154 def node(self):
155 155 return self._node
156 156 def hex(self):
157 157 return hex(self.node())
158 158 def manifest(self):
159 159 return self._manifest
160 160 def phasestr(self):
161 161 return phases.phasenames[self.phase()]
162 162 def mutable(self):
163 163 return self.phase() > phases.public
164 164
165 165 def getfileset(self, expr):
166 166 return fileset.getfileset(self, expr)
167 167
168 168 def obsolete(self):
169 169 """True if the changeset is obsolete"""
170 170 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
171 171
172 172 def extinct(self):
173 173 """True if the changeset is extinct"""
174 174 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
175 175
176 176 def unstable(self):
177 177 """True if the changeset is not obsolete but it's ancestor are"""
178 178 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
179 179
180 180 def bumped(self):
181 181 """True if the changeset try to be a successor of a public changeset
182 182
183 183 Only non-public and non-obsolete changesets may be bumped.
184 184 """
185 185 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
186 186
187 187 def divergent(self):
188 188 """Is a successors of a changeset with multiple possible successors set
189 189
190 190 Only non-public and non-obsolete changesets may be divergent.
191 191 """
192 192 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
193 193
194 194 def troubled(self):
195 195 """True if the changeset is either unstable, bumped or divergent"""
196 196 return self.unstable() or self.bumped() or self.divergent()
197 197
198 198 def troubles(self):
199 199 """return the list of troubles affecting this changesets.
200 200
201 201 Troubles are returned as strings. possible values are:
202 202 - unstable,
203 203 - bumped,
204 204 - divergent.
205 205 """
206 206 troubles = []
207 207 if self.unstable():
208 208 troubles.append('unstable')
209 209 if self.bumped():
210 210 troubles.append('bumped')
211 211 if self.divergent():
212 212 troubles.append('divergent')
213 213 return troubles
214 214
215 215 def parents(self):
216 216 """return contexts for each parent changeset"""
217 217 return self._parents
218 218
219 219 def p1(self):
220 220 return self._parents[0]
221 221
222 222 def p2(self):
223 223 if len(self._parents) == 2:
224 224 return self._parents[1]
225 225 return changectx(self._repo, -1)
226 226
227 227 def _fileinfo(self, path):
228 228 if '_manifest' in self.__dict__:
229 229 try:
230 230 return self._manifest[path], self._manifest.flags(path)
231 231 except KeyError:
232 232 raise error.ManifestLookupError(self._node, path,
233 233 _('not found in manifest'))
234 234 if '_manifestdelta' in self.__dict__ or path in self.files():
235 235 if path in self._manifestdelta:
236 236 return (self._manifestdelta[path],
237 237 self._manifestdelta.flags(path))
238 238 node, flag = self._repo.manifest.find(self._changeset[0], path)
239 239 if not node:
240 240 raise error.ManifestLookupError(self._node, path,
241 241 _('not found in manifest'))
242 242
243 243 return node, flag
244 244
245 245 def filenode(self, path):
246 246 return self._fileinfo(path)[0]
247 247
248 248 def flags(self, path):
249 249 try:
250 250 return self._fileinfo(path)[1]
251 251 except error.LookupError:
252 252 return ''
253 253
254 254 def sub(self, path):
255 255 return subrepo.subrepo(self, path)
256 256
257 257 def match(self, pats=[], include=None, exclude=None, default='glob'):
258 258 r = self._repo
259 259 return matchmod.match(r.root, r.getcwd(), pats,
260 260 include, exclude, default,
261 261 auditor=r.auditor, ctx=self)
262 262
263 263 def diff(self, ctx2=None, match=None, **opts):
264 264 """Returns a diff generator for the given contexts and matcher"""
265 265 if ctx2 is None:
266 266 ctx2 = self.p1()
267 267 if ctx2 is not None:
268 268 ctx2 = self._repo[ctx2]
269 269 diffopts = patch.diffopts(self._repo.ui, opts)
270 270 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
271 271
272 272 @propertycache
273 273 def _dirs(self):
274 274 return scmutil.dirs(self._manifest)
275 275
276 276 def dirs(self):
277 277 return self._dirs
278 278
279 279 def dirty(self, missing=False, merge=True, branch=True):
280 280 return False
281 281
282 282 def status(self, other=None, match=None, listignored=False,
283 283 listclean=False, listunknown=False, listsubrepos=False):
284 284 """return status of files between two nodes or node and working
285 285 directory.
286 286
287 287 If other is None, compare this node with working directory.
288 288
289 289 returns (modified, added, removed, deleted, unknown, ignored, clean)
290 290 """
291 291
292 292 ctx1 = self
293 293 ctx2 = self._repo[other]
294 294
295 295 # This next code block is, admittedly, fragile logic that tests for
296 296 # reversing the contexts and wouldn't need to exist if it weren't for
297 297 # the fast (and common) code path of comparing the working directory
298 298 # with its first parent.
299 299 #
300 300 # What we're aiming for here is the ability to call:
301 301 #
302 302 # workingctx.status(parentctx)
303 303 #
304 304 # If we always built the manifest for each context and compared those,
305 305 # then we'd be done. But the special case of the above call means we
306 306 # just copy the manifest of the parent.
307 307 reversed = False
308 308 if (not isinstance(ctx1, changectx)
309 309 and isinstance(ctx2, changectx)):
310 310 reversed = True
311 311 ctx1, ctx2 = ctx2, ctx1
312 312
313 313 r = [[], [], [], [], [], [], []]
314 314 match = ctx2._matchstatus(ctx1, r, match, listignored, listclean,
315 315 listunknown)
316 316 r = ctx2._prestatus(ctx1, r, match, listignored, listclean, listunknown)
317 317 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
318 318 listunknown)
319 319 r = ctx2._poststatus(ctx1, r, match, listignored, listclean,
320 320 listunknown)
321 321
322 322 if reversed:
323 323 # reverse added and removed
324 324 r[1], r[2] = r[2], r[1]
325 325
326 326 if listsubrepos:
327 327 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
328 328 rev2 = ctx2.subrev(subpath)
329 329 try:
330 330 submatch = matchmod.narrowmatcher(subpath, match)
331 331 s = sub.status(rev2, match=submatch, ignored=listignored,
332 332 clean=listclean, unknown=listunknown,
333 333 listsubrepos=True)
334 334 for rfiles, sfiles in zip(r, s):
335 335 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
336 336 except error.LookupError:
337 337 self._repo.ui.status(_("skipping missing "
338 338 "subrepository: %s\n") % subpath)
339 339
340 340 for l in r:
341 341 l.sort()
342 342
343 343 # we return a tuple to signify that this list isn't changing
344 344 return scmutil.status(*r)
345 345
346 346
347 347 def makememctx(repo, parents, text, user, date, branch, files, store,
348 348 editor=None):
349 349 def getfilectx(repo, memctx, path):
350 350 data, mode, copied = store.getfile(path)
351 351 if data is None:
352 352 return None
353 353 islink, isexec = mode
354 354 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
355 355 copied=copied, memctx=memctx)
356 356 extra = {}
357 357 if branch:
358 358 extra['branch'] = encoding.fromlocal(branch)
359 359 ctx = memctx(repo, parents, text, files, getfilectx, user,
360 360 date, extra, editor)
361 361 return ctx
362 362
363 363 class changectx(basectx):
364 364 """A changecontext object makes access to data related to a particular
365 365 changeset convenient. It represents a read-only context already present in
366 366 the repo."""
367 367 def __init__(self, repo, changeid=''):
368 368 """changeid is a revision number, node, or tag"""
369 369
370 370 # since basectx.__new__ already took care of copying the object, we
371 371 # don't need to do anything in __init__, so we just exit here
372 372 if isinstance(changeid, basectx):
373 373 return
374 374
375 375 if changeid == '':
376 376 changeid = '.'
377 377 self._repo = repo
378 378
379 379 if isinstance(changeid, int):
380 380 try:
381 381 self._node = repo.changelog.node(changeid)
382 382 except IndexError:
383 383 raise error.RepoLookupError(
384 384 _("unknown revision '%s'") % changeid)
385 385 self._rev = changeid
386 386 return
387 387 if isinstance(changeid, long):
388 388 changeid = str(changeid)
389 389 if changeid == '.':
390 390 self._node = repo.dirstate.p1()
391 391 self._rev = repo.changelog.rev(self._node)
392 392 return
393 393 if changeid == 'null':
394 394 self._node = nullid
395 395 self._rev = nullrev
396 396 return
397 397 if changeid == 'tip':
398 398 self._node = repo.changelog.tip()
399 399 self._rev = repo.changelog.rev(self._node)
400 400 return
401 401 if len(changeid) == 20:
402 402 try:
403 403 self._node = changeid
404 404 self._rev = repo.changelog.rev(changeid)
405 405 return
406 406 except LookupError:
407 407 pass
408 408
409 409 try:
410 410 r = int(changeid)
411 411 if str(r) != changeid:
412 412 raise ValueError
413 413 l = len(repo.changelog)
414 414 if r < 0:
415 415 r += l
416 416 if r < 0 or r >= l:
417 417 raise ValueError
418 418 self._rev = r
419 419 self._node = repo.changelog.node(r)
420 420 return
421 421 except (ValueError, OverflowError, IndexError):
422 422 pass
423 423
424 424 if len(changeid) == 40:
425 425 try:
426 426 self._node = bin(changeid)
427 427 self._rev = repo.changelog.rev(self._node)
428 428 return
429 429 except (TypeError, LookupError):
430 430 pass
431 431
432 432 if changeid in repo._bookmarks:
433 433 self._node = repo._bookmarks[changeid]
434 434 self._rev = repo.changelog.rev(self._node)
435 435 return
436 436 if changeid in repo._tagscache.tags:
437 437 self._node = repo._tagscache.tags[changeid]
438 438 self._rev = repo.changelog.rev(self._node)
439 439 return
440 440 try:
441 441 self._node = repo.branchtip(changeid)
442 442 self._rev = repo.changelog.rev(self._node)
443 443 return
444 444 except error.RepoLookupError:
445 445 pass
446 446
447 447 self._node = repo.changelog._partialmatch(changeid)
448 448 if self._node is not None:
449 449 self._rev = repo.changelog.rev(self._node)
450 450 return
451 451
452 452 # lookup failed
453 453 # check if it might have come from damaged dirstate
454 454 #
455 455 # XXX we could avoid the unfiltered if we had a recognizable exception
456 456 # for filtered changeset access
457 457 if changeid in repo.unfiltered().dirstate.parents():
458 458 raise error.Abort(_("working directory has unknown parent '%s'!")
459 459 % short(changeid))
460 460 try:
461 461 if len(changeid) == 20:
462 462 changeid = hex(changeid)
463 463 except TypeError:
464 464 pass
465 465 raise error.RepoLookupError(
466 466 _("unknown revision '%s'") % changeid)
467 467
468 468 def __hash__(self):
469 469 try:
470 470 return hash(self._rev)
471 471 except AttributeError:
472 472 return id(self)
473 473
474 474 def __nonzero__(self):
475 475 return self._rev != nullrev
476 476
477 477 @propertycache
478 478 def _changeset(self):
479 479 return self._repo.changelog.read(self.rev())
480 480
481 481 @propertycache
482 482 def _manifest(self):
483 483 return self._repo.manifest.read(self._changeset[0])
484 484
485 485 @propertycache
486 486 def _manifestdelta(self):
487 487 return self._repo.manifest.readdelta(self._changeset[0])
488 488
489 489 @propertycache
490 490 def _parents(self):
491 491 p = self._repo.changelog.parentrevs(self._rev)
492 492 if p[1] == nullrev:
493 493 p = p[:-1]
494 494 return [changectx(self._repo, x) for x in p]
495 495
496 496 def changeset(self):
497 497 return self._changeset
498 498 def manifestnode(self):
499 499 return self._changeset[0]
500 500
501 501 def user(self):
502 502 return self._changeset[1]
503 503 def date(self):
504 504 return self._changeset[2]
505 505 def files(self):
506 506 return self._changeset[3]
507 507 def description(self):
508 508 return self._changeset[4]
509 509 def branch(self):
510 510 return encoding.tolocal(self._changeset[5].get("branch"))
511 511 def closesbranch(self):
512 512 return 'close' in self._changeset[5]
513 513 def extra(self):
514 514 return self._changeset[5]
515 515 def tags(self):
516 516 return self._repo.nodetags(self._node)
517 517 def bookmarks(self):
518 518 return self._repo.nodebookmarks(self._node)
519 519 def phase(self):
520 520 return self._repo._phasecache.phase(self._repo, self._rev)
521 521 def hidden(self):
522 522 return self._rev in repoview.filterrevs(self._repo, 'visible')
523 523
524 524 def children(self):
525 525 """return contexts for each child changeset"""
526 526 c = self._repo.changelog.children(self._node)
527 527 return [changectx(self._repo, x) for x in c]
528 528
529 529 def ancestors(self):
530 530 for a in self._repo.changelog.ancestors([self._rev]):
531 531 yield changectx(self._repo, a)
532 532
533 533 def descendants(self):
534 534 for d in self._repo.changelog.descendants([self._rev]):
535 535 yield changectx(self._repo, d)
536 536
537 537 def filectx(self, path, fileid=None, filelog=None):
538 538 """get a file context from this changeset"""
539 539 if fileid is None:
540 540 fileid = self.filenode(path)
541 541 return filectx(self._repo, path, fileid=fileid,
542 542 changectx=self, filelog=filelog)
543 543
544 544 def ancestor(self, c2, warn=False):
545 545 """return the "best" ancestor context of self and c2
546 546
547 547 If there are multiple candidates, it will show a message and check
548 548 merge.preferancestor configuration before falling back to the
549 549 revlog ancestor."""
550 550 # deal with workingctxs
551 551 n2 = c2._node
552 552 if n2 is None:
553 553 n2 = c2._parents[0]._node
554 554 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
555 555 if not cahs:
556 556 anc = nullid
557 557 elif len(cahs) == 1:
558 558 anc = cahs[0]
559 559 else:
560 560 for r in self._repo.ui.configlist('merge', 'preferancestor'):
561 561 try:
562 562 ctx = changectx(self._repo, r)
563 563 except error.RepoLookupError:
564 564 continue
565 565 anc = ctx.node()
566 566 if anc in cahs:
567 567 break
568 568 else:
569 569 anc = self._repo.changelog.ancestor(self._node, n2)
570 570 if warn:
571 571 self._repo.ui.status(
572 572 (_("note: using %s as ancestor of %s and %s\n") %
573 573 (short(anc), short(self._node), short(n2))) +
574 574 ''.join(_(" alternatively, use --config "
575 575 "merge.preferancestor=%s\n") %
576 576 short(n) for n in sorted(cahs) if n != anc))
577 577 return changectx(self._repo, anc)
578 578
579 579 def descendant(self, other):
580 580 """True if other is descendant of this changeset"""
581 581 return self._repo.changelog.descendant(self._rev, other._rev)
582 582
583 583 def walk(self, match):
584 584 fset = set(match.files())
585 585 # for dirstate.walk, files=['.'] means "walk the whole tree".
586 586 # follow that here, too
587 587 fset.discard('.')
588 588
589 589 # avoid the entire walk if we're only looking for specific files
590 590 if fset and not match.anypats():
591 591 if util.all([fn in self for fn in fset]):
592 592 for fn in sorted(fset):
593 593 if match(fn):
594 594 yield fn
595 595 raise StopIteration
596 596
597 597 for fn in self:
598 598 if fn in fset:
599 599 # specified pattern is the exact name
600 600 fset.remove(fn)
601 601 if match(fn):
602 602 yield fn
603 603 for fn in sorted(fset):
604 604 if fn in self._dirs:
605 605 # specified pattern is a directory
606 606 continue
607 607 match.bad(fn, _('no such file in rev %s') % self)
608 608
609 609 def matches(self, match):
610 610 return self.walk(match)
611 611
612 612 class basefilectx(object):
613 613 """A filecontext object represents the common logic for its children:
614 614 filectx: read-only access to a filerevision that is already present
615 615 in the repo,
616 616 workingfilectx: a filecontext that represents files from the working
617 617 directory,
618 618 memfilectx: a filecontext that represents files in-memory."""
619 619 def __new__(cls, repo, path, *args, **kwargs):
620 620 return super(basefilectx, cls).__new__(cls)
621 621
622 622 @propertycache
623 623 def _filelog(self):
624 624 return self._repo.file(self._path)
625 625
626 626 @propertycache
627 627 def _changeid(self):
628 628 if '_changeid' in self.__dict__:
629 629 return self._changeid
630 630 elif '_changectx' in self.__dict__:
631 631 return self._changectx.rev()
632 632 else:
633 633 return self._filelog.linkrev(self._filerev)
634 634
635 635 @propertycache
636 636 def _filenode(self):
637 637 if '_fileid' in self.__dict__:
638 638 return self._filelog.lookup(self._fileid)
639 639 else:
640 640 return self._changectx.filenode(self._path)
641 641
642 642 @propertycache
643 643 def _filerev(self):
644 644 return self._filelog.rev(self._filenode)
645 645
646 646 @propertycache
647 647 def _repopath(self):
648 648 return self._path
649 649
650 650 def __nonzero__(self):
651 651 try:
652 652 self._filenode
653 653 return True
654 654 except error.LookupError:
655 655 # file is missing
656 656 return False
657 657
658 658 def __str__(self):
659 659 return "%s@%s" % (self.path(), self._changectx)
660 660
661 661 def __repr__(self):
662 662 return "<%s %s>" % (type(self).__name__, str(self))
663 663
664 664 def __hash__(self):
665 665 try:
666 666 return hash((self._path, self._filenode))
667 667 except AttributeError:
668 668 return id(self)
669 669
670 670 def __eq__(self, other):
671 671 try:
672 672 return (type(self) == type(other) and self._path == other._path
673 673 and self._filenode == other._filenode)
674 674 except AttributeError:
675 675 return False
676 676
677 677 def __ne__(self, other):
678 678 return not (self == other)
679 679
680 680 def filerev(self):
681 681 return self._filerev
682 682 def filenode(self):
683 683 return self._filenode
684 684 def flags(self):
685 685 return self._changectx.flags(self._path)
686 686 def filelog(self):
687 687 return self._filelog
688 688 def rev(self):
689 689 return self._changeid
690 690 def linkrev(self):
691 691 return self._filelog.linkrev(self._filerev)
692 692 def node(self):
693 693 return self._changectx.node()
694 694 def hex(self):
695 695 return self._changectx.hex()
696 696 def user(self):
697 697 return self._changectx.user()
698 698 def date(self):
699 699 return self._changectx.date()
700 700 def files(self):
701 701 return self._changectx.files()
702 702 def description(self):
703 703 return self._changectx.description()
704 704 def branch(self):
705 705 return self._changectx.branch()
706 706 def extra(self):
707 707 return self._changectx.extra()
708 708 def phase(self):
709 709 return self._changectx.phase()
710 710 def phasestr(self):
711 711 return self._changectx.phasestr()
712 712 def manifest(self):
713 713 return self._changectx.manifest()
714 714 def changectx(self):
715 715 return self._changectx
716 716
717 717 def path(self):
718 718 return self._path
719 719
720 720 def isbinary(self):
721 721 try:
722 722 return util.binary(self.data())
723 723 except IOError:
724 724 return False
725 725 def isexec(self):
726 726 return 'x' in self.flags()
727 727 def islink(self):
728 728 return 'l' in self.flags()
729 729
730 730 def cmp(self, fctx):
731 731 """compare with other file context
732 732
733 733 returns True if different than fctx.
734 734 """
735 735 if (fctx._filerev is None
736 736 and (self._repo._encodefilterpats
737 737 # if file data starts with '\1\n', empty metadata block is
738 738 # prepended, which adds 4 bytes to filelog.size().
739 739 or self.size() - 4 == fctx.size())
740 740 or self.size() == fctx.size()):
741 741 return self._filelog.cmp(self._filenode, fctx.data())
742 742
743 743 return True
744 744
745 745 def parents(self):
746 746 _path = self._path
747 747 fl = self._filelog
748 748 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
749 749
750 750 r = self._filelog.renamed(self._filenode)
751 751 if r:
752 752 pl[0] = (r[0], r[1], None)
753 753
754 754 return [filectx(self._repo, p, fileid=n, filelog=l)
755 755 for p, n, l in pl if n != nullid]
756 756
757 757 def p1(self):
758 758 return self.parents()[0]
759 759
760 760 def p2(self):
761 761 p = self.parents()
762 762 if len(p) == 2:
763 763 return p[1]
764 764 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
765 765
766 766 def annotate(self, follow=False, linenumber=None, diffopts=None):
767 767 '''returns a list of tuples of (ctx, line) for each line
768 768 in the file, where ctx is the filectx of the node where
769 769 that line was last changed.
770 770 This returns tuples of ((ctx, linenumber), line) for each line,
771 771 if "linenumber" parameter is NOT "None".
772 772 In such tuples, linenumber means one at the first appearance
773 773 in the managed file.
774 774 To reduce annotation cost,
775 775 this returns fixed value(False is used) as linenumber,
776 776 if "linenumber" parameter is "False".'''
777 777
778 778 if linenumber is None:
779 779 def decorate(text, rev):
780 780 return ([rev] * len(text.splitlines()), text)
781 781 elif linenumber:
782 782 def decorate(text, rev):
783 783 size = len(text.splitlines())
784 784 return ([(rev, i) for i in xrange(1, size + 1)], text)
785 785 else:
786 786 def decorate(text, rev):
787 787 return ([(rev, False)] * len(text.splitlines()), text)
788 788
789 789 def pair(parent, child):
790 790 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
791 791 refine=True)
792 792 for (a1, a2, b1, b2), t in blocks:
793 793 # Changed blocks ('!') or blocks made only of blank lines ('~')
794 794 # belong to the child.
795 795 if t == '=':
796 796 child[0][b1:b2] = parent[0][a1:a2]
797 797 return child
798 798
799 799 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
800 800
801 801 def parents(f):
802 802 pl = f.parents()
803 803
804 804 # Don't return renamed parents if we aren't following.
805 805 if not follow:
806 806 pl = [p for p in pl if p.path() == f.path()]
807 807
808 808 # renamed filectx won't have a filelog yet, so set it
809 809 # from the cache to save time
810 810 for p in pl:
811 811 if not '_filelog' in p.__dict__:
812 812 p._filelog = getlog(p.path())
813 813
814 814 return pl
815 815
816 816 # use linkrev to find the first changeset where self appeared
817 817 if self.rev() != self.linkrev():
818 818 base = self.filectx(self.filenode())
819 819 else:
820 820 base = self
821 821
822 822 # This algorithm would prefer to be recursive, but Python is a
823 823 # bit recursion-hostile. Instead we do an iterative
824 824 # depth-first search.
825 825
826 826 visit = [base]
827 827 hist = {}
828 828 pcache = {}
829 829 needed = {base: 1}
830 830 while visit:
831 831 f = visit[-1]
832 832 pcached = f in pcache
833 833 if not pcached:
834 834 pcache[f] = parents(f)
835 835
836 836 ready = True
837 837 pl = pcache[f]
838 838 for p in pl:
839 839 if p not in hist:
840 840 ready = False
841 841 visit.append(p)
842 842 if not pcached:
843 843 needed[p] = needed.get(p, 0) + 1
844 844 if ready:
845 845 visit.pop()
846 846 reusable = f in hist
847 847 if reusable:
848 848 curr = hist[f]
849 849 else:
850 850 curr = decorate(f.data(), f)
851 851 for p in pl:
852 852 if not reusable:
853 853 curr = pair(hist[p], curr)
854 854 if needed[p] == 1:
855 855 del hist[p]
856 856 del needed[p]
857 857 else:
858 858 needed[p] -= 1
859 859
860 860 hist[f] = curr
861 861 pcache[f] = []
862 862
863 863 return zip(hist[base][0], hist[base][1].splitlines(True))
864 864
865 865 def ancestors(self, followfirst=False):
866 866 visit = {}
867 867 c = self
868 868 cut = followfirst and 1 or None
869 869 while True:
870 870 for parent in c.parents()[:cut]:
871 871 visit[(parent.rev(), parent.node())] = parent
872 872 if not visit:
873 873 break
874 874 c = visit.pop(max(visit))
875 875 yield c
876 876
877 877 class filectx(basefilectx):
878 878 """A filecontext object makes access to data related to a particular
879 879 filerevision convenient."""
880 880 def __init__(self, repo, path, changeid=None, fileid=None,
881 881 filelog=None, changectx=None):
882 882 """changeid can be a changeset revision, node, or tag.
883 883 fileid can be a file revision or node."""
884 884 self._repo = repo
885 885 self._path = path
886 886
887 887 assert (changeid is not None
888 888 or fileid is not None
889 889 or changectx is not None), \
890 890 ("bad args: changeid=%r, fileid=%r, changectx=%r"
891 891 % (changeid, fileid, changectx))
892 892
893 893 if filelog is not None:
894 894 self._filelog = filelog
895 895
896 896 if changeid is not None:
897 897 self._changeid = changeid
898 898 if changectx is not None:
899 899 self._changectx = changectx
900 900 if fileid is not None:
901 901 self._fileid = fileid
902 902
903 903 @propertycache
904 904 def _changectx(self):
905 905 try:
906 906 return changectx(self._repo, self._changeid)
907 907 except error.RepoLookupError:
908 908 # Linkrev may point to any revision in the repository. When the
909 909 # repository is filtered this may lead to `filectx` trying to build
910 910 # `changectx` for filtered revision. In such case we fallback to
911 911 # creating `changectx` on the unfiltered version of the reposition.
912 912 # This fallback should not be an issue because `changectx` from
913 913 # `filectx` are not used in complex operations that care about
914 914 # filtering.
915 915 #
916 916 # This fallback is a cheap and dirty fix that prevent several
917 917 # crashes. It does not ensure the behavior is correct. However the
918 918 # behavior was not correct before filtering either and "incorrect
919 919 # behavior" is seen as better as "crash"
920 920 #
921 921 # Linkrevs have several serious troubles with filtering that are
922 922 # complicated to solve. Proper handling of the issue here should be
923 923 # considered when solving linkrev issue are on the table.
924 924 return changectx(self._repo.unfiltered(), self._changeid)
925 925
926 926 def filectx(self, fileid):
927 927 '''opens an arbitrary revision of the file without
928 928 opening a new filelog'''
929 929 return filectx(self._repo, self._path, fileid=fileid,
930 930 filelog=self._filelog)
931 931
932 932 def data(self):
933 933 try:
934 934 return self._filelog.read(self._filenode)
935 935 except error.CensoredNodeError:
936 936 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
937 937 return ""
938 938 raise util.Abort(_("censored node: %s") % short(self._filenode),
939 939 hint="set censor.policy to ignore errors")
940 940
941 941 def size(self):
942 942 return self._filelog.size(self._filerev)
943 943
944 944 def renamed(self):
945 945 """check if file was actually renamed in this changeset revision
946 946
947 947 If rename logged in file revision, we report copy for changeset only
948 948 if file revisions linkrev points back to the changeset in question
949 949 or both changeset parents contain different file revisions.
950 950 """
951 951
952 952 renamed = self._filelog.renamed(self._filenode)
953 953 if not renamed:
954 954 return renamed
955 955
956 956 if self.rev() == self.linkrev():
957 957 return renamed
958 958
959 959 name = self.path()
960 960 fnode = self._filenode
961 961 for p in self._changectx.parents():
962 962 try:
963 963 if fnode == p.filenode(name):
964 964 return None
965 965 except error.LookupError:
966 966 pass
967 967 return renamed
968 968
969 969 def children(self):
970 970 # hard for renames
971 971 c = self._filelog.children(self._filenode)
972 972 return [filectx(self._repo, self._path, fileid=x,
973 973 filelog=self._filelog) for x in c]
974 974
975 975 class committablectx(basectx):
976 976 """A committablectx object provides common functionality for a context that
977 977 wants the ability to commit, e.g. workingctx or memctx."""
978 978 def __init__(self, repo, text="", user=None, date=None, extra=None,
979 979 changes=None):
980 980 self._repo = repo
981 981 self._rev = None
982 982 self._node = None
983 983 self._text = text
984 984 if date:
985 985 self._date = util.parsedate(date)
986 986 if user:
987 987 self._user = user
988 988 if changes:
989 989 self._status = changes
990 990
991 991 self._extra = {}
992 992 if extra:
993 993 self._extra = extra.copy()
994 994 if 'branch' not in self._extra:
995 995 try:
996 996 branch = encoding.fromlocal(self._repo.dirstate.branch())
997 997 except UnicodeDecodeError:
998 998 raise util.Abort(_('branch name not in UTF-8!'))
999 999 self._extra['branch'] = branch
1000 1000 if self._extra['branch'] == '':
1001 1001 self._extra['branch'] = 'default'
1002 1002
1003 1003 def __str__(self):
1004 1004 return str(self._parents[0]) + "+"
1005 1005
1006 1006 def __nonzero__(self):
1007 1007 return True
1008 1008
1009 1009 def _buildflagfunc(self):
1010 1010 # Create a fallback function for getting file flags when the
1011 1011 # filesystem doesn't support them
1012 1012
1013 1013 copiesget = self._repo.dirstate.copies().get
1014 1014
1015 1015 if len(self._parents) < 2:
1016 1016 # when we have one parent, it's easy: copy from parent
1017 1017 man = self._parents[0].manifest()
1018 1018 def func(f):
1019 1019 f = copiesget(f, f)
1020 1020 return man.flags(f)
1021 1021 else:
1022 1022 # merges are tricky: we try to reconstruct the unstored
1023 1023 # result from the merge (issue1802)
1024 1024 p1, p2 = self._parents
1025 1025 pa = p1.ancestor(p2)
1026 1026 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1027 1027
1028 1028 def func(f):
1029 1029 f = copiesget(f, f) # may be wrong for merges with copies
1030 1030 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1031 1031 if fl1 == fl2:
1032 1032 return fl1
1033 1033 if fl1 == fla:
1034 1034 return fl2
1035 1035 if fl2 == fla:
1036 1036 return fl1
1037 1037 return '' # punt for conflicts
1038 1038
1039 1039 return func
1040 1040
1041 1041 @propertycache
1042 1042 def _flagfunc(self):
1043 1043 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1044 1044
1045 1045 @propertycache
1046 1046 def _manifest(self):
1047 1047 """generate a manifest corresponding to the values in self._status"""
1048 1048
1049 1049 man = self._parents[0].manifest().copy()
1050 1050 if len(self._parents) > 1:
1051 1051 man2 = self.p2().manifest()
1052 1052 def getman(f):
1053 1053 if f in man:
1054 1054 return man
1055 1055 return man2
1056 1056 else:
1057 1057 getman = lambda f: man
1058 1058
1059 1059 copied = self._repo.dirstate.copies()
1060 1060 ff = self._flagfunc
1061 1061 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1062 1062 for f in l:
1063 1063 orig = copied.get(f, f)
1064 1064 man[f] = getman(orig).get(orig, nullid) + i
1065 1065 try:
1066 man.set(f, ff(f))
1066 man.setflag(f, ff(f))
1067 1067 except OSError:
1068 1068 pass
1069 1069
1070 1070 for f in self._status.deleted + self._status.removed:
1071 1071 if f in man:
1072 1072 del man[f]
1073 1073
1074 1074 return man
1075 1075
1076 1076 @propertycache
1077 1077 def _status(self):
1078 1078 return self._repo.status()
1079 1079
1080 1080 @propertycache
1081 1081 def _user(self):
1082 1082 return self._repo.ui.username()
1083 1083
1084 1084 @propertycache
1085 1085 def _date(self):
1086 1086 return util.makedate()
1087 1087
1088 1088 def subrev(self, subpath):
1089 1089 return None
1090 1090
1091 1091 def user(self):
1092 1092 return self._user or self._repo.ui.username()
1093 1093 def date(self):
1094 1094 return self._date
1095 1095 def description(self):
1096 1096 return self._text
1097 1097 def files(self):
1098 1098 return sorted(self._status.modified + self._status.added +
1099 1099 self._status.removed)
1100 1100
1101 1101 def modified(self):
1102 1102 return self._status.modified
1103 1103 def added(self):
1104 1104 return self._status.added
1105 1105 def removed(self):
1106 1106 return self._status.removed
1107 1107 def deleted(self):
1108 1108 return self._status.deleted
1109 1109 def unknown(self):
1110 1110 return self._status.unknown
1111 1111 def ignored(self):
1112 1112 return self._status.ignored
1113 1113 def clean(self):
1114 1114 return self._status.clean
1115 1115 def branch(self):
1116 1116 return encoding.tolocal(self._extra['branch'])
1117 1117 def closesbranch(self):
1118 1118 return 'close' in self._extra
1119 1119 def extra(self):
1120 1120 return self._extra
1121 1121
1122 1122 def tags(self):
1123 1123 t = []
1124 1124 for p in self.parents():
1125 1125 t.extend(p.tags())
1126 1126 return t
1127 1127
1128 1128 def bookmarks(self):
1129 1129 b = []
1130 1130 for p in self.parents():
1131 1131 b.extend(p.bookmarks())
1132 1132 return b
1133 1133
1134 1134 def phase(self):
1135 1135 phase = phases.draft # default phase to draft
1136 1136 for p in self.parents():
1137 1137 phase = max(phase, p.phase())
1138 1138 return phase
1139 1139
1140 1140 def hidden(self):
1141 1141 return False
1142 1142
1143 1143 def children(self):
1144 1144 return []
1145 1145
1146 1146 def flags(self, path):
1147 1147 if '_manifest' in self.__dict__:
1148 1148 try:
1149 1149 return self._manifest.flags(path)
1150 1150 except KeyError:
1151 1151 return ''
1152 1152
1153 1153 try:
1154 1154 return self._flagfunc(path)
1155 1155 except OSError:
1156 1156 return ''
1157 1157
1158 1158 def ancestor(self, c2):
1159 1159 """return the "best" ancestor context of self and c2"""
1160 1160 return self._parents[0].ancestor(c2) # punt on two parents for now
1161 1161
1162 1162 def walk(self, match):
1163 1163 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1164 1164 True, False))
1165 1165
1166 1166 def matches(self, match):
1167 1167 return sorted(self._repo.dirstate.matches(match))
1168 1168
1169 1169 def ancestors(self):
1170 1170 for a in self._repo.changelog.ancestors(
1171 1171 [p.rev() for p in self._parents]):
1172 1172 yield changectx(self._repo, a)
1173 1173
1174 1174 def markcommitted(self, node):
1175 1175 """Perform post-commit cleanup necessary after committing this ctx
1176 1176
1177 1177 Specifically, this updates backing stores this working context
1178 1178 wraps to reflect the fact that the changes reflected by this
1179 1179 workingctx have been committed. For example, it marks
1180 1180 modified and added files as normal in the dirstate.
1181 1181
1182 1182 """
1183 1183
1184 1184 self._repo.dirstate.beginparentchange()
1185 1185 for f in self.modified() + self.added():
1186 1186 self._repo.dirstate.normal(f)
1187 1187 for f in self.removed():
1188 1188 self._repo.dirstate.drop(f)
1189 1189 self._repo.dirstate.setparents(node)
1190 1190 self._repo.dirstate.endparentchange()
1191 1191
1192 1192 def dirs(self):
1193 1193 return self._repo.dirstate.dirs()
1194 1194
1195 1195 class workingctx(committablectx):
1196 1196 """A workingctx object makes access to data related to
1197 1197 the current working directory convenient.
1198 1198 date - any valid date string or (unixtime, offset), or None.
1199 1199 user - username string, or None.
1200 1200 extra - a dictionary of extra values, or None.
1201 1201 changes - a list of file lists as returned by localrepo.status()
1202 1202 or None to use the repository status.
1203 1203 """
1204 1204 def __init__(self, repo, text="", user=None, date=None, extra=None,
1205 1205 changes=None):
1206 1206 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1207 1207
1208 1208 def __iter__(self):
1209 1209 d = self._repo.dirstate
1210 1210 for f in d:
1211 1211 if d[f] != 'r':
1212 1212 yield f
1213 1213
1214 1214 def __contains__(self, key):
1215 1215 return self._repo.dirstate[key] not in "?r"
1216 1216
1217 1217 @propertycache
1218 1218 def _parents(self):
1219 1219 p = self._repo.dirstate.parents()
1220 1220 if p[1] == nullid:
1221 1221 p = p[:-1]
1222 1222 return [changectx(self._repo, x) for x in p]
1223 1223
1224 1224 def filectx(self, path, filelog=None):
1225 1225 """get a file context from the working directory"""
1226 1226 return workingfilectx(self._repo, path, workingctx=self,
1227 1227 filelog=filelog)
1228 1228
1229 1229 def dirty(self, missing=False, merge=True, branch=True):
1230 1230 "check whether a working directory is modified"
1231 1231 # check subrepos first
1232 1232 for s in sorted(self.substate):
1233 1233 if self.sub(s).dirty():
1234 1234 return True
1235 1235 # check current working dir
1236 1236 return ((merge and self.p2()) or
1237 1237 (branch and self.branch() != self.p1().branch()) or
1238 1238 self.modified() or self.added() or self.removed() or
1239 1239 (missing and self.deleted()))
1240 1240
1241 1241 def add(self, list, prefix=""):
1242 1242 join = lambda f: os.path.join(prefix, f)
1243 1243 wlock = self._repo.wlock()
1244 1244 ui, ds = self._repo.ui, self._repo.dirstate
1245 1245 try:
1246 1246 rejected = []
1247 1247 lstat = self._repo.wvfs.lstat
1248 1248 for f in list:
1249 1249 scmutil.checkportable(ui, join(f))
1250 1250 try:
1251 1251 st = lstat(f)
1252 1252 except OSError:
1253 1253 ui.warn(_("%s does not exist!\n") % join(f))
1254 1254 rejected.append(f)
1255 1255 continue
1256 1256 if st.st_size > 10000000:
1257 1257 ui.warn(_("%s: up to %d MB of RAM may be required "
1258 1258 "to manage this file\n"
1259 1259 "(use 'hg revert %s' to cancel the "
1260 1260 "pending addition)\n")
1261 1261 % (f, 3 * st.st_size // 1000000, join(f)))
1262 1262 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1263 1263 ui.warn(_("%s not added: only files and symlinks "
1264 1264 "supported currently\n") % join(f))
1265 1265 rejected.append(f)
1266 1266 elif ds[f] in 'amn':
1267 1267 ui.warn(_("%s already tracked!\n") % join(f))
1268 1268 elif ds[f] == 'r':
1269 1269 ds.normallookup(f)
1270 1270 else:
1271 1271 ds.add(f)
1272 1272 return rejected
1273 1273 finally:
1274 1274 wlock.release()
1275 1275
1276 1276 def forget(self, files, prefix=""):
1277 1277 join = lambda f: os.path.join(prefix, f)
1278 1278 wlock = self._repo.wlock()
1279 1279 try:
1280 1280 rejected = []
1281 1281 for f in files:
1282 1282 if f not in self._repo.dirstate:
1283 1283 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1284 1284 rejected.append(f)
1285 1285 elif self._repo.dirstate[f] != 'a':
1286 1286 self._repo.dirstate.remove(f)
1287 1287 else:
1288 1288 self._repo.dirstate.drop(f)
1289 1289 return rejected
1290 1290 finally:
1291 1291 wlock.release()
1292 1292
1293 1293 def undelete(self, list):
1294 1294 pctxs = self.parents()
1295 1295 wlock = self._repo.wlock()
1296 1296 try:
1297 1297 for f in list:
1298 1298 if self._repo.dirstate[f] != 'r':
1299 1299 self._repo.ui.warn(_("%s not removed!\n") % f)
1300 1300 else:
1301 1301 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1302 1302 t = fctx.data()
1303 1303 self._repo.wwrite(f, t, fctx.flags())
1304 1304 self._repo.dirstate.normal(f)
1305 1305 finally:
1306 1306 wlock.release()
1307 1307
1308 1308 def copy(self, source, dest):
1309 1309 try:
1310 1310 st = self._repo.wvfs.lstat(dest)
1311 1311 except OSError, err:
1312 1312 if err.errno != errno.ENOENT:
1313 1313 raise
1314 1314 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1315 1315 return
1316 1316 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1317 1317 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1318 1318 "symbolic link\n") % dest)
1319 1319 else:
1320 1320 wlock = self._repo.wlock()
1321 1321 try:
1322 1322 if self._repo.dirstate[dest] in '?r':
1323 1323 self._repo.dirstate.add(dest)
1324 1324 self._repo.dirstate.copy(source, dest)
1325 1325 finally:
1326 1326 wlock.release()
1327 1327
1328 1328 def _filtersuspectsymlink(self, files):
1329 1329 if not files or self._repo.dirstate._checklink:
1330 1330 return files
1331 1331
1332 1332 # Symlink placeholders may get non-symlink-like contents
1333 1333 # via user error or dereferencing by NFS or Samba servers,
1334 1334 # so we filter out any placeholders that don't look like a
1335 1335 # symlink
1336 1336 sane = []
1337 1337 for f in files:
1338 1338 if self.flags(f) == 'l':
1339 1339 d = self[f].data()
1340 1340 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1341 1341 self._repo.ui.debug('ignoring suspect symlink placeholder'
1342 1342 ' "%s"\n' % f)
1343 1343 continue
1344 1344 sane.append(f)
1345 1345 return sane
1346 1346
1347 1347 def _checklookup(self, files):
1348 1348 # check for any possibly clean files
1349 1349 if not files:
1350 1350 return [], []
1351 1351
1352 1352 modified = []
1353 1353 fixup = []
1354 1354 pctx = self._parents[0]
1355 1355 # do a full compare of any files that might have changed
1356 1356 for f in sorted(files):
1357 1357 if (f not in pctx or self.flags(f) != pctx.flags(f)
1358 1358 or pctx[f].cmp(self[f])):
1359 1359 modified.append(f)
1360 1360 else:
1361 1361 fixup.append(f)
1362 1362
1363 1363 # update dirstate for files that are actually clean
1364 1364 if fixup:
1365 1365 try:
1366 1366 # updating the dirstate is optional
1367 1367 # so we don't wait on the lock
1368 1368 # wlock can invalidate the dirstate, so cache normal _after_
1369 1369 # taking the lock
1370 1370 wlock = self._repo.wlock(False)
1371 1371 normal = self._repo.dirstate.normal
1372 1372 try:
1373 1373 for f in fixup:
1374 1374 normal(f)
1375 1375 finally:
1376 1376 wlock.release()
1377 1377 except error.LockError:
1378 1378 pass
1379 1379 return modified, fixup
1380 1380
1381 1381 def _manifestmatches(self, match, s):
1382 1382 """Slow path for workingctx
1383 1383
1384 1384 The fast path is when we compare the working directory to its parent
1385 1385 which means this function is comparing with a non-parent; therefore we
1386 1386 need to build a manifest and return what matches.
1387 1387 """
1388 1388 mf = self._repo['.']._manifestmatches(match, s)
1389 1389 modified, added, removed = s[0:3]
1390 1390 for f in modified + added:
1391 1391 mf[f] = None
1392 mf.set(f, self.flags(f))
1392 mf.setflag(f, self.flags(f))
1393 1393 for f in removed:
1394 1394 if f in mf:
1395 1395 del mf[f]
1396 1396 return mf
1397 1397
1398 1398 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
1399 1399 """override the parent hook with a dirstate query
1400 1400
1401 1401 We use this prestatus hook to populate the status with information from
1402 1402 the dirstate.
1403 1403 """
1404 1404 # doesn't need to call super; if that changes, be aware that super
1405 1405 # calls self.manifest which would slow down the common case of calling
1406 1406 # status against a workingctx's parent
1407 1407 return self._dirstatestatus(match, listignored, listclean, listunknown)
1408 1408
1409 1409 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
1410 1410 """override the parent hook with a filter for suspect symlinks
1411 1411
1412 1412 We use this poststatus hook to filter out symlinks that might have
1413 1413 accidentally ended up with the entire contents of the file they are
1414 1414 susposed to be linking to.
1415 1415 """
1416 1416 s[0] = self._filtersuspectsymlink(s[0])
1417 1417 self._status = scmutil.status(*s)
1418 1418 return s
1419 1419
1420 1420 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1421 1421 unknown=False):
1422 1422 '''Gets the status from the dirstate -- internal use only.'''
1423 1423 listignored, listclean, listunknown = ignored, clean, unknown
1424 1424 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1425 1425 subrepos = []
1426 1426 if '.hgsub' in self:
1427 1427 subrepos = sorted(self.substate)
1428 1428 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1429 1429 listclean, listunknown)
1430 1430 modified, added, removed, deleted, unknown, ignored, clean = s
1431 1431
1432 1432 # check for any possibly clean files
1433 1433 if cmp:
1434 1434 modified2, fixup = self._checklookup(cmp)
1435 1435 modified += modified2
1436 1436
1437 1437 # update dirstate for files that are actually clean
1438 1438 if fixup and listclean:
1439 1439 clean += fixup
1440 1440
1441 1441 return [modified, added, removed, deleted, unknown, ignored, clean]
1442 1442
1443 1443 def _buildstatus(self, other, s, match, listignored, listclean,
1444 1444 listunknown):
1445 1445 """build a status with respect to another context
1446 1446
1447 1447 This includes logic for maintaining the fast path of status when
1448 1448 comparing the working directory against its parent, which is to skip
1449 1449 building a new manifest if self (working directory) is not comparing
1450 1450 against its parent (repo['.']).
1451 1451 """
1452 1452 if other != self._repo['.']:
1453 1453 s = super(workingctx, self)._buildstatus(other, s, match,
1454 1454 listignored, listclean,
1455 1455 listunknown)
1456 1456 return s
1457 1457
1458 1458 def _matchstatus(self, other, s, match, listignored, listclean,
1459 1459 listunknown):
1460 1460 """override the match method with a filter for directory patterns
1461 1461
1462 1462 We use inheritance to customize the match.bad method only in cases of
1463 1463 workingctx since it belongs only to the working directory when
1464 1464 comparing against the parent changeset.
1465 1465
1466 1466 If we aren't comparing against the working directory's parent, then we
1467 1467 just use the default match object sent to us.
1468 1468 """
1469 1469 superself = super(workingctx, self)
1470 1470 match = superself._matchstatus(other, s, match, listignored, listclean,
1471 1471 listunknown)
1472 1472 if other != self._repo['.']:
1473 1473 def bad(f, msg):
1474 1474 # 'f' may be a directory pattern from 'match.files()',
1475 1475 # so 'f not in ctx1' is not enough
1476 1476 if f not in other and f not in other.dirs():
1477 1477 self._repo.ui.warn('%s: %s\n' %
1478 1478 (self._repo.dirstate.pathto(f), msg))
1479 1479 match.bad = bad
1480 1480 return match
1481 1481
1482 1482 def status(self, other='.', match=None, listignored=False,
1483 1483 listclean=False, listunknown=False, listsubrepos=False):
1484 1484 # yet to be determined: what to do if 'other' is a 'workingctx' or a
1485 1485 # 'memctx'?
1486 1486 s = super(workingctx, self).status(other, match, listignored, listclean,
1487 1487 listunknown, listsubrepos)
1488 1488 # calling 'super' subtly reveresed the contexts, so we flip the results
1489 1489 # (s[1] is 'added' and s[2] is 'removed')
1490 1490 s = list(s)
1491 1491 s[1], s[2] = s[2], s[1]
1492 1492 return scmutil.status(*s)
1493 1493
1494 1494 class committablefilectx(basefilectx):
1495 1495 """A committablefilectx provides common functionality for a file context
1496 1496 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1497 1497 def __init__(self, repo, path, filelog=None, ctx=None):
1498 1498 self._repo = repo
1499 1499 self._path = path
1500 1500 self._changeid = None
1501 1501 self._filerev = self._filenode = None
1502 1502
1503 1503 if filelog is not None:
1504 1504 self._filelog = filelog
1505 1505 if ctx:
1506 1506 self._changectx = ctx
1507 1507
1508 1508 def __nonzero__(self):
1509 1509 return True
1510 1510
1511 1511 def parents(self):
1512 1512 '''return parent filectxs, following copies if necessary'''
1513 1513 def filenode(ctx, path):
1514 1514 return ctx._manifest.get(path, nullid)
1515 1515
1516 1516 path = self._path
1517 1517 fl = self._filelog
1518 1518 pcl = self._changectx._parents
1519 1519 renamed = self.renamed()
1520 1520
1521 1521 if renamed:
1522 1522 pl = [renamed + (None,)]
1523 1523 else:
1524 1524 pl = [(path, filenode(pcl[0], path), fl)]
1525 1525
1526 1526 for pc in pcl[1:]:
1527 1527 pl.append((path, filenode(pc, path), fl))
1528 1528
1529 1529 return [filectx(self._repo, p, fileid=n, filelog=l)
1530 1530 for p, n, l in pl if n != nullid]
1531 1531
1532 1532 def children(self):
1533 1533 return []
1534 1534
1535 1535 class workingfilectx(committablefilectx):
1536 1536 """A workingfilectx object makes access to data related to a particular
1537 1537 file in the working directory convenient."""
1538 1538 def __init__(self, repo, path, filelog=None, workingctx=None):
1539 1539 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1540 1540
1541 1541 @propertycache
1542 1542 def _changectx(self):
1543 1543 return workingctx(self._repo)
1544 1544
1545 1545 def data(self):
1546 1546 return self._repo.wread(self._path)
1547 1547 def renamed(self):
1548 1548 rp = self._repo.dirstate.copied(self._path)
1549 1549 if not rp:
1550 1550 return None
1551 1551 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1552 1552
1553 1553 def size(self):
1554 1554 return self._repo.wvfs.lstat(self._path).st_size
1555 1555 def date(self):
1556 1556 t, tz = self._changectx.date()
1557 1557 try:
1558 1558 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1559 1559 except OSError, err:
1560 1560 if err.errno != errno.ENOENT:
1561 1561 raise
1562 1562 return (t, tz)
1563 1563
1564 1564 def cmp(self, fctx):
1565 1565 """compare with other file context
1566 1566
1567 1567 returns True if different than fctx.
1568 1568 """
1569 1569 # fctx should be a filectx (not a workingfilectx)
1570 1570 # invert comparison to reuse the same code path
1571 1571 return fctx.cmp(self)
1572 1572
1573 1573 def remove(self, ignoremissing=False):
1574 1574 """wraps unlink for a repo's working directory"""
1575 1575 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1576 1576
1577 1577 def write(self, data, flags):
1578 1578 """wraps repo.wwrite"""
1579 1579 self._repo.wwrite(self._path, data, flags)
1580 1580
1581 1581 class memctx(committablectx):
1582 1582 """Use memctx to perform in-memory commits via localrepo.commitctx().
1583 1583
1584 1584 Revision information is supplied at initialization time while
1585 1585 related files data and is made available through a callback
1586 1586 mechanism. 'repo' is the current localrepo, 'parents' is a
1587 1587 sequence of two parent revisions identifiers (pass None for every
1588 1588 missing parent), 'text' is the commit message and 'files' lists
1589 1589 names of files touched by the revision (normalized and relative to
1590 1590 repository root).
1591 1591
1592 1592 filectxfn(repo, memctx, path) is a callable receiving the
1593 1593 repository, the current memctx object and the normalized path of
1594 1594 requested file, relative to repository root. It is fired by the
1595 1595 commit function for every file in 'files', but calls order is
1596 1596 undefined. If the file is available in the revision being
1597 1597 committed (updated or added), filectxfn returns a memfilectx
1598 1598 object. If the file was removed, filectxfn raises an
1599 1599 IOError. Moved files are represented by marking the source file
1600 1600 removed and the new file added with copy information (see
1601 1601 memfilectx).
1602 1602
1603 1603 user receives the committer name and defaults to current
1604 1604 repository username, date is the commit date in any format
1605 1605 supported by util.parsedate() and defaults to current date, extra
1606 1606 is a dictionary of metadata or is left empty.
1607 1607 """
1608 1608
1609 1609 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1610 1610 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1611 1611 # this field to determine what to do in filectxfn.
1612 1612 _returnnoneformissingfiles = True
1613 1613
1614 1614 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1615 1615 date=None, extra=None, editor=False):
1616 1616 super(memctx, self).__init__(repo, text, user, date, extra)
1617 1617 self._rev = None
1618 1618 self._node = None
1619 1619 parents = [(p or nullid) for p in parents]
1620 1620 p1, p2 = parents
1621 1621 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1622 1622 files = sorted(set(files))
1623 1623 self._status = scmutil.status(files, [], [], [], [], [], [])
1624 1624 self._filectxfn = filectxfn
1625 1625 self.substate = {}
1626 1626
1627 1627 # if store is not callable, wrap it in a function
1628 1628 if not callable(filectxfn):
1629 1629 def getfilectx(repo, memctx, path):
1630 1630 fctx = filectxfn[path]
1631 1631 # this is weird but apparently we only keep track of one parent
1632 1632 # (why not only store that instead of a tuple?)
1633 1633 copied = fctx.renamed()
1634 1634 if copied:
1635 1635 copied = copied[0]
1636 1636 return memfilectx(repo, path, fctx.data(),
1637 1637 islink=fctx.islink(), isexec=fctx.isexec(),
1638 1638 copied=copied, memctx=memctx)
1639 1639 self._filectxfn = getfilectx
1640 1640
1641 1641 self._extra = extra and extra.copy() or {}
1642 1642 if self._extra.get('branch', '') == '':
1643 1643 self._extra['branch'] = 'default'
1644 1644
1645 1645 if editor:
1646 1646 self._text = editor(self._repo, self, [])
1647 1647 self._repo.savecommitmessage(self._text)
1648 1648
1649 1649 def filectx(self, path, filelog=None):
1650 1650 """get a file context from the working directory
1651 1651
1652 1652 Returns None if file doesn't exist and should be removed."""
1653 1653 return self._filectxfn(self._repo, self, path)
1654 1654
1655 1655 def commit(self):
1656 1656 """commit context to the repo"""
1657 1657 return self._repo.commitctx(self)
1658 1658
1659 1659 @propertycache
1660 1660 def _manifest(self):
1661 1661 """generate a manifest based on the return values of filectxfn"""
1662 1662
1663 1663 # keep this simple for now; just worry about p1
1664 1664 pctx = self._parents[0]
1665 1665 man = pctx.manifest().copy()
1666 1666
1667 1667 for f, fnode in man.iteritems():
1668 1668 p1node = nullid
1669 1669 p2node = nullid
1670 1670 p = pctx[f].parents() # if file isn't in pctx, check p2?
1671 1671 if len(p) > 0:
1672 1672 p1node = p[0].node()
1673 1673 if len(p) > 1:
1674 1674 p2node = p[1].node()
1675 1675 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1676 1676
1677 1677 return man
1678 1678
1679 1679
1680 1680 class memfilectx(committablefilectx):
1681 1681 """memfilectx represents an in-memory file to commit.
1682 1682
1683 1683 See memctx and commitablefilectx for more details.
1684 1684 """
1685 1685 def __init__(self, repo, path, data, islink=False,
1686 1686 isexec=False, copied=None, memctx=None):
1687 1687 """
1688 1688 path is the normalized file path relative to repository root.
1689 1689 data is the file content as a string.
1690 1690 islink is True if the file is a symbolic link.
1691 1691 isexec is True if the file is executable.
1692 1692 copied is the source file path if current file was copied in the
1693 1693 revision being committed, or None."""
1694 1694 super(memfilectx, self).__init__(repo, path, None, memctx)
1695 1695 self._data = data
1696 1696 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1697 1697 self._copied = None
1698 1698 if copied:
1699 1699 self._copied = (copied, nullid)
1700 1700
1701 1701 def data(self):
1702 1702 return self._data
1703 1703 def size(self):
1704 1704 return len(self.data())
1705 1705 def flags(self):
1706 1706 return self._flags
1707 1707 def renamed(self):
1708 1708 return self._copied
1709 1709
1710 1710 def remove(self, ignoremissing=False):
1711 1711 """wraps unlink for a repo's working directory"""
1712 1712 # need to figure out what to do here
1713 1713 del self._changectx[self._path]
1714 1714
1715 1715 def write(self, data, flags):
1716 1716 """wraps repo.wwrite"""
1717 1717 self._data = data
@@ -1,1790 +1,1790 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 # a list of (ui, featureset) functions.
184 184 # only functions defined in module of enabled extensions are invoked
185 185 featuresetupfuncs = set()
186 186
187 187 def _baserequirements(self, create):
188 188 return self.requirements[:]
189 189
190 190 def __init__(self, baseui, path=None, create=False):
191 191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 192 self.wopener = self.wvfs
193 193 self.root = self.wvfs.base
194 194 self.path = self.wvfs.join(".hg")
195 195 self.origroot = path
196 196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 197 self.vfs = scmutil.vfs(self.path)
198 198 self.opener = self.vfs
199 199 self.baseui = baseui
200 200 self.ui = baseui.copy()
201 201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 202 # A list of callback to shape the phase if no data were found.
203 203 # Callback are in the form: func(repo, roots) --> processed root.
204 204 # This list it to be filled by extension during repo setup
205 205 self._phasedefaults = []
206 206 try:
207 207 self.ui.readconfig(self.join("hgrc"), self.root)
208 208 extensions.loadall(self.ui)
209 209 except IOError:
210 210 pass
211 211
212 212 if self.featuresetupfuncs:
213 213 self.supported = set(self._basesupported) # use private copy
214 214 extmods = set(m.__name__ for n, m
215 215 in extensions.extensions(self.ui))
216 216 for setupfunc in self.featuresetupfuncs:
217 217 if setupfunc.__module__ in extmods:
218 218 setupfunc(self.ui, self.supported)
219 219 else:
220 220 self.supported = self._basesupported
221 221
222 222 if not self.vfs.isdir():
223 223 if create:
224 224 if not self.wvfs.exists():
225 225 self.wvfs.makedirs()
226 226 self.vfs.makedir(notindexed=True)
227 227 requirements = self._baserequirements(create)
228 228 if self.ui.configbool('format', 'usestore', True):
229 229 self.vfs.mkdir("store")
230 230 requirements.append("store")
231 231 if self.ui.configbool('format', 'usefncache', True):
232 232 requirements.append("fncache")
233 233 if self.ui.configbool('format', 'dotencode', True):
234 234 requirements.append('dotencode')
235 235 # create an invalid changelog
236 236 self.vfs.append(
237 237 "00changelog.i",
238 238 '\0\0\0\2' # represents revlogv2
239 239 ' dummy changelog to prevent using the old repo layout'
240 240 )
241 241 if self.ui.configbool('format', 'generaldelta', False):
242 242 requirements.append("generaldelta")
243 243 requirements = set(requirements)
244 244 else:
245 245 raise error.RepoError(_("repository %s not found") % path)
246 246 elif create:
247 247 raise error.RepoError(_("repository %s already exists") % path)
248 248 else:
249 249 try:
250 250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 251 except IOError, inst:
252 252 if inst.errno != errno.ENOENT:
253 253 raise
254 254 requirements = set()
255 255
256 256 self.sharedpath = self.path
257 257 try:
258 258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 259 realpath=True)
260 260 s = vfs.base
261 261 if not vfs.exists():
262 262 raise error.RepoError(
263 263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 264 self.sharedpath = s
265 265 except IOError, inst:
266 266 if inst.errno != errno.ENOENT:
267 267 raise
268 268
269 269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 270 self.spath = self.store.path
271 271 self.svfs = self.store.vfs
272 272 self.sopener = self.svfs
273 273 self.sjoin = self.store.join
274 274 self.vfs.createmode = self.store.createmode
275 275 self._applyrequirements(requirements)
276 276 if create:
277 277 self._writerequirements()
278 278
279 279
280 280 self._branchcaches = {}
281 281 self.filterpats = {}
282 282 self._datafilters = {}
283 283 self._transref = self._lockref = self._wlockref = None
284 284
285 285 # A cache for various files under .hg/ that tracks file changes,
286 286 # (used by the filecache decorator)
287 287 #
288 288 # Maps a property name to its util.filecacheentry
289 289 self._filecache = {}
290 290
291 291 # hold sets of revision to be filtered
292 292 # should be cleared when something might have changed the filter value:
293 293 # - new changesets,
294 294 # - phase change,
295 295 # - new obsolescence marker,
296 296 # - working directory parent change,
297 297 # - bookmark changes
298 298 self.filteredrevcache = {}
299 299
300 300 def close(self):
301 301 pass
302 302
303 303 def _restrictcapabilities(self, caps):
304 304 # bundle2 is not ready for prime time, drop it unless explicitly
305 305 # required by the tests (or some brave tester)
306 306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 307 caps = set(caps)
308 308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 310 return caps
311 311
312 312 def _applyrequirements(self, requirements):
313 313 self.requirements = requirements
314 314 self.sopener.options = dict((r, 1) for r in requirements
315 315 if r in self.openerreqs)
316 316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 317 if chunkcachesize is not None:
318 318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 319
320 320 def _writerequirements(self):
321 321 reqfile = self.opener("requires", "w")
322 322 for r in sorted(self.requirements):
323 323 reqfile.write("%s\n" % r)
324 324 reqfile.close()
325 325
326 326 def _checknested(self, path):
327 327 """Determine if path is a legal nested repository."""
328 328 if not path.startswith(self.root):
329 329 return False
330 330 subpath = path[len(self.root) + 1:]
331 331 normsubpath = util.pconvert(subpath)
332 332
333 333 # XXX: Checking against the current working copy is wrong in
334 334 # the sense that it can reject things like
335 335 #
336 336 # $ hg cat -r 10 sub/x.txt
337 337 #
338 338 # if sub/ is no longer a subrepository in the working copy
339 339 # parent revision.
340 340 #
341 341 # However, it can of course also allow things that would have
342 342 # been rejected before, such as the above cat command if sub/
343 343 # is a subrepository now, but was a normal directory before.
344 344 # The old path auditor would have rejected by mistake since it
345 345 # panics when it sees sub/.hg/.
346 346 #
347 347 # All in all, checking against the working copy seems sensible
348 348 # since we want to prevent access to nested repositories on
349 349 # the filesystem *now*.
350 350 ctx = self[None]
351 351 parts = util.splitpath(subpath)
352 352 while parts:
353 353 prefix = '/'.join(parts)
354 354 if prefix in ctx.substate:
355 355 if prefix == normsubpath:
356 356 return True
357 357 else:
358 358 sub = ctx.sub(prefix)
359 359 return sub.checknested(subpath[len(prefix) + 1:])
360 360 else:
361 361 parts.pop()
362 362 return False
363 363
364 364 def peer(self):
365 365 return localpeer(self) # not cached to avoid reference cycle
366 366
367 367 def unfiltered(self):
368 368 """Return unfiltered version of the repository
369 369
370 370 Intended to be overwritten by filtered repo."""
371 371 return self
372 372
373 373 def filtered(self, name):
374 374 """Return a filtered version of a repository"""
375 375 # build a new class with the mixin and the current class
376 376 # (possibly subclass of the repo)
377 377 class proxycls(repoview.repoview, self.unfiltered().__class__):
378 378 pass
379 379 return proxycls(self, name)
380 380
381 381 @repofilecache('bookmarks')
382 382 def _bookmarks(self):
383 383 return bookmarks.bmstore(self)
384 384
385 385 @repofilecache('bookmarks.current')
386 386 def _bookmarkcurrent(self):
387 387 return bookmarks.readcurrent(self)
388 388
389 389 def bookmarkheads(self, bookmark):
390 390 name = bookmark.split('@', 1)[0]
391 391 heads = []
392 392 for mark, n in self._bookmarks.iteritems():
393 393 if mark.split('@', 1)[0] == name:
394 394 heads.append(n)
395 395 return heads
396 396
397 397 @storecache('phaseroots')
398 398 def _phasecache(self):
399 399 return phases.phasecache(self, self._phasedefaults)
400 400
401 401 @storecache('obsstore')
402 402 def obsstore(self):
403 403 # read default format for new obsstore.
404 404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
405 405 # rely on obsstore class default when possible.
406 406 kwargs = {}
407 407 if defaultformat is not None:
408 408 defaultformat['defaultformat'] = defaultformat
409 409 store = obsolete.obsstore(self.sopener, **kwargs)
410 410 if store and not obsolete._enabled:
411 411 # message is rare enough to not be translated
412 412 msg = 'obsolete feature not enabled but %i markers found!\n'
413 413 self.ui.warn(msg % len(list(store)))
414 414 return store
415 415
416 416 @storecache('00changelog.i')
417 417 def changelog(self):
418 418 c = changelog.changelog(self.sopener)
419 419 if 'HG_PENDING' in os.environ:
420 420 p = os.environ['HG_PENDING']
421 421 if p.startswith(self.root):
422 422 c.readpending('00changelog.i.a')
423 423 return c
424 424
425 425 @storecache('00manifest.i')
426 426 def manifest(self):
427 427 return manifest.manifest(self.sopener)
428 428
429 429 @repofilecache('dirstate')
430 430 def dirstate(self):
431 431 warned = [0]
432 432 def validate(node):
433 433 try:
434 434 self.changelog.rev(node)
435 435 return node
436 436 except error.LookupError:
437 437 if not warned[0]:
438 438 warned[0] = True
439 439 self.ui.warn(_("warning: ignoring unknown"
440 440 " working parent %s!\n") % short(node))
441 441 return nullid
442 442
443 443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
444 444
445 445 def __getitem__(self, changeid):
446 446 if changeid is None:
447 447 return context.workingctx(self)
448 448 return context.changectx(self, changeid)
449 449
450 450 def __contains__(self, changeid):
451 451 try:
452 452 return bool(self.lookup(changeid))
453 453 except error.RepoLookupError:
454 454 return False
455 455
456 456 def __nonzero__(self):
457 457 return True
458 458
459 459 def __len__(self):
460 460 return len(self.changelog)
461 461
462 462 def __iter__(self):
463 463 return iter(self.changelog)
464 464
465 465 def revs(self, expr, *args):
466 466 '''Return a list of revisions matching the given revset'''
467 467 expr = revset.formatspec(expr, *args)
468 468 m = revset.match(None, expr)
469 469 return m(self, revset.spanset(self))
470 470
471 471 def set(self, expr, *args):
472 472 '''
473 473 Yield a context for each matching revision, after doing arg
474 474 replacement via revset.formatspec
475 475 '''
476 476 for r in self.revs(expr, *args):
477 477 yield self[r]
478 478
479 479 def url(self):
480 480 return 'file:' + self.root
481 481
482 482 def hook(self, name, throw=False, **args):
483 483 """Call a hook, passing this repo instance.
484 484
485 485 This a convenience method to aid invoking hooks. Extensions likely
486 486 won't call this unless they have registered a custom hook or are
487 487 replacing code that is expected to call a hook.
488 488 """
489 489 return hook.hook(self.ui, self, name, throw, **args)
490 490
491 491 @unfilteredmethod
492 492 def _tag(self, names, node, message, local, user, date, extra={},
493 493 editor=False):
494 494 if isinstance(names, str):
495 495 names = (names,)
496 496
497 497 branches = self.branchmap()
498 498 for name in names:
499 499 self.hook('pretag', throw=True, node=hex(node), tag=name,
500 500 local=local)
501 501 if name in branches:
502 502 self.ui.warn(_("warning: tag %s conflicts with existing"
503 503 " branch name\n") % name)
504 504
505 505 def writetags(fp, names, munge, prevtags):
506 506 fp.seek(0, 2)
507 507 if prevtags and prevtags[-1] != '\n':
508 508 fp.write('\n')
509 509 for name in names:
510 510 m = munge and munge(name) or name
511 511 if (self._tagscache.tagtypes and
512 512 name in self._tagscache.tagtypes):
513 513 old = self.tags().get(name, nullid)
514 514 fp.write('%s %s\n' % (hex(old), m))
515 515 fp.write('%s %s\n' % (hex(node), m))
516 516 fp.close()
517 517
518 518 prevtags = ''
519 519 if local:
520 520 try:
521 521 fp = self.opener('localtags', 'r+')
522 522 except IOError:
523 523 fp = self.opener('localtags', 'a')
524 524 else:
525 525 prevtags = fp.read()
526 526
527 527 # local tags are stored in the current charset
528 528 writetags(fp, names, None, prevtags)
529 529 for name in names:
530 530 self.hook('tag', node=hex(node), tag=name, local=local)
531 531 return
532 532
533 533 try:
534 534 fp = self.wfile('.hgtags', 'rb+')
535 535 except IOError, e:
536 536 if e.errno != errno.ENOENT:
537 537 raise
538 538 fp = self.wfile('.hgtags', 'ab')
539 539 else:
540 540 prevtags = fp.read()
541 541
542 542 # committed tags are stored in UTF-8
543 543 writetags(fp, names, encoding.fromlocal, prevtags)
544 544
545 545 fp.close()
546 546
547 547 self.invalidatecaches()
548 548
549 549 if '.hgtags' not in self.dirstate:
550 550 self[None].add(['.hgtags'])
551 551
552 552 m = matchmod.exact(self.root, '', ['.hgtags'])
553 553 tagnode = self.commit(message, user, date, extra=extra, match=m,
554 554 editor=editor)
555 555
556 556 for name in names:
557 557 self.hook('tag', node=hex(node), tag=name, local=local)
558 558
559 559 return tagnode
560 560
561 561 def tag(self, names, node, message, local, user, date, editor=False):
562 562 '''tag a revision with one or more symbolic names.
563 563
564 564 names is a list of strings or, when adding a single tag, names may be a
565 565 string.
566 566
567 567 if local is True, the tags are stored in a per-repository file.
568 568 otherwise, they are stored in the .hgtags file, and a new
569 569 changeset is committed with the change.
570 570
571 571 keyword arguments:
572 572
573 573 local: whether to store tags in non-version-controlled file
574 574 (default False)
575 575
576 576 message: commit message to use if committing
577 577
578 578 user: name of user to use if committing
579 579
580 580 date: date tuple to use if committing'''
581 581
582 582 if not local:
583 583 m = matchmod.exact(self.root, '', ['.hgtags'])
584 584 if util.any(self.status(match=m, unknown=True, ignored=True)):
585 585 raise util.Abort(_('working copy of .hgtags is changed'),
586 586 hint=_('please commit .hgtags manually'))
587 587
588 588 self.tags() # instantiate the cache
589 589 self._tag(names, node, message, local, user, date, editor=editor)
590 590
591 591 @filteredpropertycache
592 592 def _tagscache(self):
593 593 '''Returns a tagscache object that contains various tags related
594 594 caches.'''
595 595
596 596 # This simplifies its cache management by having one decorated
597 597 # function (this one) and the rest simply fetch things from it.
598 598 class tagscache(object):
599 599 def __init__(self):
600 600 # These two define the set of tags for this repository. tags
601 601 # maps tag name to node; tagtypes maps tag name to 'global' or
602 602 # 'local'. (Global tags are defined by .hgtags across all
603 603 # heads, and local tags are defined in .hg/localtags.)
604 604 # They constitute the in-memory cache of tags.
605 605 self.tags = self.tagtypes = None
606 606
607 607 self.nodetagscache = self.tagslist = None
608 608
609 609 cache = tagscache()
610 610 cache.tags, cache.tagtypes = self._findtags()
611 611
612 612 return cache
613 613
614 614 def tags(self):
615 615 '''return a mapping of tag to node'''
616 616 t = {}
617 617 if self.changelog.filteredrevs:
618 618 tags, tt = self._findtags()
619 619 else:
620 620 tags = self._tagscache.tags
621 621 for k, v in tags.iteritems():
622 622 try:
623 623 # ignore tags to unknown nodes
624 624 self.changelog.rev(v)
625 625 t[k] = v
626 626 except (error.LookupError, ValueError):
627 627 pass
628 628 return t
629 629
630 630 def _findtags(self):
631 631 '''Do the hard work of finding tags. Return a pair of dicts
632 632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
633 633 maps tag name to a string like \'global\' or \'local\'.
634 634 Subclasses or extensions are free to add their own tags, but
635 635 should be aware that the returned dicts will be retained for the
636 636 duration of the localrepo object.'''
637 637
638 638 # XXX what tagtype should subclasses/extensions use? Currently
639 639 # mq and bookmarks add tags, but do not set the tagtype at all.
640 640 # Should each extension invent its own tag type? Should there
641 641 # be one tagtype for all such "virtual" tags? Or is the status
642 642 # quo fine?
643 643
644 644 alltags = {} # map tag name to (node, hist)
645 645 tagtypes = {}
646 646
647 647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
648 648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
649 649
650 650 # Build the return dicts. Have to re-encode tag names because
651 651 # the tags module always uses UTF-8 (in order not to lose info
652 652 # writing to the cache), but the rest of Mercurial wants them in
653 653 # local encoding.
654 654 tags = {}
655 655 for (name, (node, hist)) in alltags.iteritems():
656 656 if node != nullid:
657 657 tags[encoding.tolocal(name)] = node
658 658 tags['tip'] = self.changelog.tip()
659 659 tagtypes = dict([(encoding.tolocal(name), value)
660 660 for (name, value) in tagtypes.iteritems()])
661 661 return (tags, tagtypes)
662 662
663 663 def tagtype(self, tagname):
664 664 '''
665 665 return the type of the given tag. result can be:
666 666
667 667 'local' : a local tag
668 668 'global' : a global tag
669 669 None : tag does not exist
670 670 '''
671 671
672 672 return self._tagscache.tagtypes.get(tagname)
673 673
674 674 def tagslist(self):
675 675 '''return a list of tags ordered by revision'''
676 676 if not self._tagscache.tagslist:
677 677 l = []
678 678 for t, n in self.tags().iteritems():
679 679 l.append((self.changelog.rev(n), t, n))
680 680 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
681 681
682 682 return self._tagscache.tagslist
683 683
684 684 def nodetags(self, node):
685 685 '''return the tags associated with a node'''
686 686 if not self._tagscache.nodetagscache:
687 687 nodetagscache = {}
688 688 for t, n in self._tagscache.tags.iteritems():
689 689 nodetagscache.setdefault(n, []).append(t)
690 690 for tags in nodetagscache.itervalues():
691 691 tags.sort()
692 692 self._tagscache.nodetagscache = nodetagscache
693 693 return self._tagscache.nodetagscache.get(node, [])
694 694
695 695 def nodebookmarks(self, node):
696 696 marks = []
697 697 for bookmark, n in self._bookmarks.iteritems():
698 698 if n == node:
699 699 marks.append(bookmark)
700 700 return sorted(marks)
701 701
702 702 def branchmap(self):
703 703 '''returns a dictionary {branch: [branchheads]} with branchheads
704 704 ordered by increasing revision number'''
705 705 branchmap.updatecache(self)
706 706 return self._branchcaches[self.filtername]
707 707
708 708 def branchtip(self, branch):
709 709 '''return the tip node for a given branch'''
710 710 try:
711 711 return self.branchmap().branchtip(branch)
712 712 except KeyError:
713 713 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
714 714
715 715 def lookup(self, key):
716 716 return self[key].node()
717 717
718 718 def lookupbranch(self, key, remote=None):
719 719 repo = remote or self
720 720 if key in repo.branchmap():
721 721 return key
722 722
723 723 repo = (remote and remote.local()) and remote or self
724 724 return repo[key].branch()
725 725
726 726 def known(self, nodes):
727 727 nm = self.changelog.nodemap
728 728 pc = self._phasecache
729 729 result = []
730 730 for n in nodes:
731 731 r = nm.get(n)
732 732 resp = not (r is None or pc.phase(self, r) >= phases.secret)
733 733 result.append(resp)
734 734 return result
735 735
736 736 def local(self):
737 737 return self
738 738
739 739 def cancopy(self):
740 740 # so statichttprepo's override of local() works
741 741 if not self.local():
742 742 return False
743 743 if not self.ui.configbool('phases', 'publish', True):
744 744 return True
745 745 # if publishing we can't copy if there is filtered content
746 746 return not self.filtered('visible').changelog.filteredrevs
747 747
748 748 def join(self, f, *insidef):
749 749 return os.path.join(self.path, f, *insidef)
750 750
751 751 def wjoin(self, f, *insidef):
752 752 return os.path.join(self.root, f, *insidef)
753 753
754 754 def file(self, f):
755 755 if f[0] == '/':
756 756 f = f[1:]
757 757 return filelog.filelog(self.sopener, f)
758 758
759 759 def changectx(self, changeid):
760 760 return self[changeid]
761 761
762 762 def parents(self, changeid=None):
763 763 '''get list of changectxs for parents of changeid'''
764 764 return self[changeid].parents()
765 765
766 766 def setparents(self, p1, p2=nullid):
767 767 self.dirstate.beginparentchange()
768 768 copies = self.dirstate.setparents(p1, p2)
769 769 pctx = self[p1]
770 770 if copies:
771 771 # Adjust copy records, the dirstate cannot do it, it
772 772 # requires access to parents manifests. Preserve them
773 773 # only for entries added to first parent.
774 774 for f in copies:
775 775 if f not in pctx and copies[f] in pctx:
776 776 self.dirstate.copy(copies[f], f)
777 777 if p2 == nullid:
778 778 for f, s in sorted(self.dirstate.copies().items()):
779 779 if f not in pctx and s not in pctx:
780 780 self.dirstate.copy(None, f)
781 781 self.dirstate.endparentchange()
782 782
783 783 def filectx(self, path, changeid=None, fileid=None):
784 784 """changeid can be a changeset revision, node, or tag.
785 785 fileid can be a file revision or node."""
786 786 return context.filectx(self, path, changeid, fileid)
787 787
788 788 def getcwd(self):
789 789 return self.dirstate.getcwd()
790 790
791 791 def pathto(self, f, cwd=None):
792 792 return self.dirstate.pathto(f, cwd)
793 793
794 794 def wfile(self, f, mode='r'):
795 795 return self.wopener(f, mode)
796 796
797 797 def _link(self, f):
798 798 return self.wvfs.islink(f)
799 799
800 800 def _loadfilter(self, filter):
801 801 if filter not in self.filterpats:
802 802 l = []
803 803 for pat, cmd in self.ui.configitems(filter):
804 804 if cmd == '!':
805 805 continue
806 806 mf = matchmod.match(self.root, '', [pat])
807 807 fn = None
808 808 params = cmd
809 809 for name, filterfn in self._datafilters.iteritems():
810 810 if cmd.startswith(name):
811 811 fn = filterfn
812 812 params = cmd[len(name):].lstrip()
813 813 break
814 814 if not fn:
815 815 fn = lambda s, c, **kwargs: util.filter(s, c)
816 816 # Wrap old filters not supporting keyword arguments
817 817 if not inspect.getargspec(fn)[2]:
818 818 oldfn = fn
819 819 fn = lambda s, c, **kwargs: oldfn(s, c)
820 820 l.append((mf, fn, params))
821 821 self.filterpats[filter] = l
822 822 return self.filterpats[filter]
823 823
824 824 def _filter(self, filterpats, filename, data):
825 825 for mf, fn, cmd in filterpats:
826 826 if mf(filename):
827 827 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
828 828 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
829 829 break
830 830
831 831 return data
832 832
833 833 @unfilteredpropertycache
834 834 def _encodefilterpats(self):
835 835 return self._loadfilter('encode')
836 836
837 837 @unfilteredpropertycache
838 838 def _decodefilterpats(self):
839 839 return self._loadfilter('decode')
840 840
841 841 def adddatafilter(self, name, filter):
842 842 self._datafilters[name] = filter
843 843
844 844 def wread(self, filename):
845 845 if self._link(filename):
846 846 data = self.wvfs.readlink(filename)
847 847 else:
848 848 data = self.wopener.read(filename)
849 849 return self._filter(self._encodefilterpats, filename, data)
850 850
851 851 def wwrite(self, filename, data, flags):
852 852 data = self._filter(self._decodefilterpats, filename, data)
853 853 if 'l' in flags:
854 854 self.wopener.symlink(data, filename)
855 855 else:
856 856 self.wopener.write(filename, data)
857 857 if 'x' in flags:
858 858 self.wvfs.setflags(filename, False, True)
859 859
860 860 def wwritedata(self, filename, data):
861 861 return self._filter(self._decodefilterpats, filename, data)
862 862
863 863 def transaction(self, desc, report=None):
864 864 tr = self._transref and self._transref() or None
865 865 if tr and tr.running():
866 866 return tr.nest()
867 867
868 868 # abort here if the journal already exists
869 869 if self.svfs.exists("journal"):
870 870 raise error.RepoError(
871 871 _("abandoned transaction found"),
872 872 hint=_("run 'hg recover' to clean up transaction"))
873 873
874 874 def onclose():
875 875 self.store.write(self._transref())
876 876
877 877 self._writejournal(desc)
878 878 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
879 879 rp = report and report or self.ui.warn
880 880 tr = transaction.transaction(rp, self.sopener,
881 881 "journal",
882 882 aftertrans(renames),
883 883 self.store.createmode,
884 884 onclose)
885 885 self._transref = weakref.ref(tr)
886 886 return tr
887 887
888 888 def _journalfiles(self):
889 889 return ((self.svfs, 'journal'),
890 890 (self.vfs, 'journal.dirstate'),
891 891 (self.vfs, 'journal.branch'),
892 892 (self.vfs, 'journal.desc'),
893 893 (self.vfs, 'journal.bookmarks'),
894 894 (self.svfs, 'journal.phaseroots'))
895 895
896 896 def undofiles(self):
897 897 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
898 898
899 899 def _writejournal(self, desc):
900 900 self.opener.write("journal.dirstate",
901 901 self.opener.tryread("dirstate"))
902 902 self.opener.write("journal.branch",
903 903 encoding.fromlocal(self.dirstate.branch()))
904 904 self.opener.write("journal.desc",
905 905 "%d\n%s\n" % (len(self), desc))
906 906 self.opener.write("journal.bookmarks",
907 907 self.opener.tryread("bookmarks"))
908 908 self.sopener.write("journal.phaseroots",
909 909 self.sopener.tryread("phaseroots"))
910 910
911 911 def recover(self):
912 912 lock = self.lock()
913 913 try:
914 914 if self.svfs.exists("journal"):
915 915 self.ui.status(_("rolling back interrupted transaction\n"))
916 916 transaction.rollback(self.sopener, "journal",
917 917 self.ui.warn)
918 918 self.invalidate()
919 919 return True
920 920 else:
921 921 self.ui.warn(_("no interrupted transaction available\n"))
922 922 return False
923 923 finally:
924 924 lock.release()
925 925
926 926 def rollback(self, dryrun=False, force=False):
927 927 wlock = lock = None
928 928 try:
929 929 wlock = self.wlock()
930 930 lock = self.lock()
931 931 if self.svfs.exists("undo"):
932 932 return self._rollback(dryrun, force)
933 933 else:
934 934 self.ui.warn(_("no rollback information available\n"))
935 935 return 1
936 936 finally:
937 937 release(lock, wlock)
938 938
939 939 @unfilteredmethod # Until we get smarter cache management
940 940 def _rollback(self, dryrun, force):
941 941 ui = self.ui
942 942 try:
943 943 args = self.opener.read('undo.desc').splitlines()
944 944 (oldlen, desc, detail) = (int(args[0]), args[1], None)
945 945 if len(args) >= 3:
946 946 detail = args[2]
947 947 oldtip = oldlen - 1
948 948
949 949 if detail and ui.verbose:
950 950 msg = (_('repository tip rolled back to revision %s'
951 951 ' (undo %s: %s)\n')
952 952 % (oldtip, desc, detail))
953 953 else:
954 954 msg = (_('repository tip rolled back to revision %s'
955 955 ' (undo %s)\n')
956 956 % (oldtip, desc))
957 957 except IOError:
958 958 msg = _('rolling back unknown transaction\n')
959 959 desc = None
960 960
961 961 if not force and self['.'] != self['tip'] and desc == 'commit':
962 962 raise util.Abort(
963 963 _('rollback of last commit while not checked out '
964 964 'may lose data'), hint=_('use -f to force'))
965 965
966 966 ui.status(msg)
967 967 if dryrun:
968 968 return 0
969 969
970 970 parents = self.dirstate.parents()
971 971 self.destroying()
972 972 transaction.rollback(self.sopener, 'undo', ui.warn)
973 973 if self.vfs.exists('undo.bookmarks'):
974 974 self.vfs.rename('undo.bookmarks', 'bookmarks')
975 975 if self.svfs.exists('undo.phaseroots'):
976 976 self.svfs.rename('undo.phaseroots', 'phaseroots')
977 977 self.invalidate()
978 978
979 979 parentgone = (parents[0] not in self.changelog.nodemap or
980 980 parents[1] not in self.changelog.nodemap)
981 981 if parentgone:
982 982 self.vfs.rename('undo.dirstate', 'dirstate')
983 983 try:
984 984 branch = self.opener.read('undo.branch')
985 985 self.dirstate.setbranch(encoding.tolocal(branch))
986 986 except IOError:
987 987 ui.warn(_('named branch could not be reset: '
988 988 'current branch is still \'%s\'\n')
989 989 % self.dirstate.branch())
990 990
991 991 self.dirstate.invalidate()
992 992 parents = tuple([p.rev() for p in self.parents()])
993 993 if len(parents) > 1:
994 994 ui.status(_('working directory now based on '
995 995 'revisions %d and %d\n') % parents)
996 996 else:
997 997 ui.status(_('working directory now based on '
998 998 'revision %d\n') % parents)
999 999 # TODO: if we know which new heads may result from this rollback, pass
1000 1000 # them to destroy(), which will prevent the branchhead cache from being
1001 1001 # invalidated.
1002 1002 self.destroyed()
1003 1003 return 0
1004 1004
1005 1005 def invalidatecaches(self):
1006 1006
1007 1007 if '_tagscache' in vars(self):
1008 1008 # can't use delattr on proxy
1009 1009 del self.__dict__['_tagscache']
1010 1010
1011 1011 self.unfiltered()._branchcaches.clear()
1012 1012 self.invalidatevolatilesets()
1013 1013
1014 1014 def invalidatevolatilesets(self):
1015 1015 self.filteredrevcache.clear()
1016 1016 obsolete.clearobscaches(self)
1017 1017
1018 1018 def invalidatedirstate(self):
1019 1019 '''Invalidates the dirstate, causing the next call to dirstate
1020 1020 to check if it was modified since the last time it was read,
1021 1021 rereading it if it has.
1022 1022
1023 1023 This is different to dirstate.invalidate() that it doesn't always
1024 1024 rereads the dirstate. Use dirstate.invalidate() if you want to
1025 1025 explicitly read the dirstate again (i.e. restoring it to a previous
1026 1026 known good state).'''
1027 1027 if hasunfilteredcache(self, 'dirstate'):
1028 1028 for k in self.dirstate._filecache:
1029 1029 try:
1030 1030 delattr(self.dirstate, k)
1031 1031 except AttributeError:
1032 1032 pass
1033 1033 delattr(self.unfiltered(), 'dirstate')
1034 1034
1035 1035 def invalidate(self):
1036 1036 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1037 1037 for k in self._filecache:
1038 1038 # dirstate is invalidated separately in invalidatedirstate()
1039 1039 if k == 'dirstate':
1040 1040 continue
1041 1041
1042 1042 try:
1043 1043 delattr(unfiltered, k)
1044 1044 except AttributeError:
1045 1045 pass
1046 1046 self.invalidatecaches()
1047 1047 self.store.invalidatecaches()
1048 1048
1049 1049 def invalidateall(self):
1050 1050 '''Fully invalidates both store and non-store parts, causing the
1051 1051 subsequent operation to reread any outside changes.'''
1052 1052 # extension should hook this to invalidate its caches
1053 1053 self.invalidate()
1054 1054 self.invalidatedirstate()
1055 1055
1056 1056 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1057 1057 try:
1058 1058 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1059 1059 except error.LockHeld, inst:
1060 1060 if not wait:
1061 1061 raise
1062 1062 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1063 1063 (desc, inst.locker))
1064 1064 # default to 600 seconds timeout
1065 1065 l = lockmod.lock(vfs, lockname,
1066 1066 int(self.ui.config("ui", "timeout", "600")),
1067 1067 releasefn, desc=desc)
1068 1068 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1069 1069 if acquirefn:
1070 1070 acquirefn()
1071 1071 return l
1072 1072
1073 1073 def _afterlock(self, callback):
1074 1074 """add a callback to the current repository lock.
1075 1075
1076 1076 The callback will be executed on lock release."""
1077 1077 l = self._lockref and self._lockref()
1078 1078 if l:
1079 1079 l.postrelease.append(callback)
1080 1080 else:
1081 1081 callback()
1082 1082
1083 1083 def lock(self, wait=True):
1084 1084 '''Lock the repository store (.hg/store) and return a weak reference
1085 1085 to the lock. Use this before modifying the store (e.g. committing or
1086 1086 stripping). If you are opening a transaction, get a lock as well.)'''
1087 1087 l = self._lockref and self._lockref()
1088 1088 if l is not None and l.held:
1089 1089 l.lock()
1090 1090 return l
1091 1091
1092 1092 def unlock():
1093 1093 for k, ce in self._filecache.items():
1094 1094 if k == 'dirstate' or k not in self.__dict__:
1095 1095 continue
1096 1096 ce.refresh()
1097 1097
1098 1098 l = self._lock(self.svfs, "lock", wait, unlock,
1099 1099 self.invalidate, _('repository %s') % self.origroot)
1100 1100 self._lockref = weakref.ref(l)
1101 1101 return l
1102 1102
1103 1103 def wlock(self, wait=True):
1104 1104 '''Lock the non-store parts of the repository (everything under
1105 1105 .hg except .hg/store) and return a weak reference to the lock.
1106 1106 Use this before modifying files in .hg.'''
1107 1107 l = self._wlockref and self._wlockref()
1108 1108 if l is not None and l.held:
1109 1109 l.lock()
1110 1110 return l
1111 1111
1112 1112 def unlock():
1113 1113 if self.dirstate.pendingparentchange():
1114 1114 self.dirstate.invalidate()
1115 1115 else:
1116 1116 self.dirstate.write()
1117 1117
1118 1118 self._filecache['dirstate'].refresh()
1119 1119
1120 1120 l = self._lock(self.vfs, "wlock", wait, unlock,
1121 1121 self.invalidatedirstate, _('working directory of %s') %
1122 1122 self.origroot)
1123 1123 self._wlockref = weakref.ref(l)
1124 1124 return l
1125 1125
1126 1126 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1127 1127 """
1128 1128 commit an individual file as part of a larger transaction
1129 1129 """
1130 1130
1131 1131 fname = fctx.path()
1132 1132 text = fctx.data()
1133 1133 flog = self.file(fname)
1134 1134 fparent1 = manifest1.get(fname, nullid)
1135 1135 fparent2 = manifest2.get(fname, nullid)
1136 1136
1137 1137 meta = {}
1138 1138 copy = fctx.renamed()
1139 1139 if copy and copy[0] != fname:
1140 1140 # Mark the new revision of this file as a copy of another
1141 1141 # file. This copy data will effectively act as a parent
1142 1142 # of this new revision. If this is a merge, the first
1143 1143 # parent will be the nullid (meaning "look up the copy data")
1144 1144 # and the second one will be the other parent. For example:
1145 1145 #
1146 1146 # 0 --- 1 --- 3 rev1 changes file foo
1147 1147 # \ / rev2 renames foo to bar and changes it
1148 1148 # \- 2 -/ rev3 should have bar with all changes and
1149 1149 # should record that bar descends from
1150 1150 # bar in rev2 and foo in rev1
1151 1151 #
1152 1152 # this allows this merge to succeed:
1153 1153 #
1154 1154 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1155 1155 # \ / merging rev3 and rev4 should use bar@rev2
1156 1156 # \- 2 --- 4 as the merge base
1157 1157 #
1158 1158
1159 1159 cfname = copy[0]
1160 1160 crev = manifest1.get(cfname)
1161 1161 newfparent = fparent2
1162 1162
1163 1163 if manifest2: # branch merge
1164 1164 if fparent2 == nullid or crev is None: # copied on remote side
1165 1165 if cfname in manifest2:
1166 1166 crev = manifest2[cfname]
1167 1167 newfparent = fparent1
1168 1168
1169 1169 # find source in nearest ancestor if we've lost track
1170 1170 if not crev:
1171 1171 self.ui.debug(" %s: searching for copy revision for %s\n" %
1172 1172 (fname, cfname))
1173 1173 for ancestor in self[None].ancestors():
1174 1174 if cfname in ancestor:
1175 1175 crev = ancestor[cfname].filenode()
1176 1176 break
1177 1177
1178 1178 if crev:
1179 1179 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1180 1180 meta["copy"] = cfname
1181 1181 meta["copyrev"] = hex(crev)
1182 1182 fparent1, fparent2 = nullid, newfparent
1183 1183 else:
1184 1184 self.ui.warn(_("warning: can't find ancestor for '%s' "
1185 1185 "copied from '%s'!\n") % (fname, cfname))
1186 1186
1187 1187 elif fparent1 == nullid:
1188 1188 fparent1, fparent2 = fparent2, nullid
1189 1189 elif fparent2 != nullid:
1190 1190 # is one parent an ancestor of the other?
1191 1191 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1192 1192 if fparent1 in fparentancestors:
1193 1193 fparent1, fparent2 = fparent2, nullid
1194 1194 elif fparent2 in fparentancestors:
1195 1195 fparent2 = nullid
1196 1196
1197 1197 # is the file changed?
1198 1198 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1199 1199 changelist.append(fname)
1200 1200 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1201 1201 # are just the flags changed during merge?
1202 1202 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1203 1203 changelist.append(fname)
1204 1204
1205 1205 return fparent1
1206 1206
1207 1207 @unfilteredmethod
1208 1208 def commit(self, text="", user=None, date=None, match=None, force=False,
1209 1209 editor=False, extra={}):
1210 1210 """Add a new revision to current repository.
1211 1211
1212 1212 Revision information is gathered from the working directory,
1213 1213 match can be used to filter the committed files. If editor is
1214 1214 supplied, it is called to get a commit message.
1215 1215 """
1216 1216
1217 1217 def fail(f, msg):
1218 1218 raise util.Abort('%s: %s' % (f, msg))
1219 1219
1220 1220 if not match:
1221 1221 match = matchmod.always(self.root, '')
1222 1222
1223 1223 if not force:
1224 1224 vdirs = []
1225 1225 match.explicitdir = vdirs.append
1226 1226 match.bad = fail
1227 1227
1228 1228 wlock = self.wlock()
1229 1229 try:
1230 1230 wctx = self[None]
1231 1231 merge = len(wctx.parents()) > 1
1232 1232
1233 1233 if (not force and merge and match and
1234 1234 (match.files() or match.anypats())):
1235 1235 raise util.Abort(_('cannot partially commit a merge '
1236 1236 '(do not specify files or patterns)'))
1237 1237
1238 1238 status = self.status(match=match, clean=force)
1239 1239 if force:
1240 1240 status.modified.extend(status.clean) # mq may commit clean files
1241 1241
1242 1242 # check subrepos
1243 1243 subs = []
1244 1244 commitsubs = set()
1245 1245 newstate = wctx.substate.copy()
1246 1246 # only manage subrepos and .hgsubstate if .hgsub is present
1247 1247 if '.hgsub' in wctx:
1248 1248 # we'll decide whether to track this ourselves, thanks
1249 1249 for c in status.modified, status.added, status.removed:
1250 1250 if '.hgsubstate' in c:
1251 1251 c.remove('.hgsubstate')
1252 1252
1253 1253 # compare current state to last committed state
1254 1254 # build new substate based on last committed state
1255 1255 oldstate = wctx.p1().substate
1256 1256 for s in sorted(newstate.keys()):
1257 1257 if not match(s):
1258 1258 # ignore working copy, use old state if present
1259 1259 if s in oldstate:
1260 1260 newstate[s] = oldstate[s]
1261 1261 continue
1262 1262 if not force:
1263 1263 raise util.Abort(
1264 1264 _("commit with new subrepo %s excluded") % s)
1265 1265 if wctx.sub(s).dirty(True):
1266 1266 if not self.ui.configbool('ui', 'commitsubrepos'):
1267 1267 raise util.Abort(
1268 1268 _("uncommitted changes in subrepo %s") % s,
1269 1269 hint=_("use --subrepos for recursive commit"))
1270 1270 subs.append(s)
1271 1271 commitsubs.add(s)
1272 1272 else:
1273 1273 bs = wctx.sub(s).basestate()
1274 1274 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1275 1275 if oldstate.get(s, (None, None, None))[1] != bs:
1276 1276 subs.append(s)
1277 1277
1278 1278 # check for removed subrepos
1279 1279 for p in wctx.parents():
1280 1280 r = [s for s in p.substate if s not in newstate]
1281 1281 subs += [s for s in r if match(s)]
1282 1282 if subs:
1283 1283 if (not match('.hgsub') and
1284 1284 '.hgsub' in (wctx.modified() + wctx.added())):
1285 1285 raise util.Abort(
1286 1286 _("can't commit subrepos without .hgsub"))
1287 1287 status.modified.insert(0, '.hgsubstate')
1288 1288
1289 1289 elif '.hgsub' in status.removed:
1290 1290 # clean up .hgsubstate when .hgsub is removed
1291 1291 if ('.hgsubstate' in wctx and
1292 1292 '.hgsubstate' not in (status.modified + status.added +
1293 1293 status.removed)):
1294 1294 status.removed.insert(0, '.hgsubstate')
1295 1295
1296 1296 # make sure all explicit patterns are matched
1297 1297 if not force and match.files():
1298 1298 matched = set(status.modified + status.added + status.removed)
1299 1299
1300 1300 for f in match.files():
1301 1301 f = self.dirstate.normalize(f)
1302 1302 if f == '.' or f in matched or f in wctx.substate:
1303 1303 continue
1304 1304 if f in status.deleted:
1305 1305 fail(f, _('file not found!'))
1306 1306 if f in vdirs: # visited directory
1307 1307 d = f + '/'
1308 1308 for mf in matched:
1309 1309 if mf.startswith(d):
1310 1310 break
1311 1311 else:
1312 1312 fail(f, _("no match under directory!"))
1313 1313 elif f not in self.dirstate:
1314 1314 fail(f, _("file not tracked!"))
1315 1315
1316 1316 cctx = context.workingctx(self, text, user, date, extra, status)
1317 1317
1318 1318 if (not force and not extra.get("close") and not merge
1319 1319 and not cctx.files()
1320 1320 and wctx.branch() == wctx.p1().branch()):
1321 1321 return None
1322 1322
1323 1323 if merge and cctx.deleted():
1324 1324 raise util.Abort(_("cannot commit merge with missing files"))
1325 1325
1326 1326 ms = mergemod.mergestate(self)
1327 1327 for f in status.modified:
1328 1328 if f in ms and ms[f] == 'u':
1329 1329 raise util.Abort(_("unresolved merge conflicts "
1330 1330 "(see hg help resolve)"))
1331 1331
1332 1332 if editor:
1333 1333 cctx._text = editor(self, cctx, subs)
1334 1334 edited = (text != cctx._text)
1335 1335
1336 1336 # Save commit message in case this transaction gets rolled back
1337 1337 # (e.g. by a pretxncommit hook). Leave the content alone on
1338 1338 # the assumption that the user will use the same editor again.
1339 1339 msgfn = self.savecommitmessage(cctx._text)
1340 1340
1341 1341 # commit subs and write new state
1342 1342 if subs:
1343 1343 for s in sorted(commitsubs):
1344 1344 sub = wctx.sub(s)
1345 1345 self.ui.status(_('committing subrepository %s\n') %
1346 1346 subrepo.subrelpath(sub))
1347 1347 sr = sub.commit(cctx._text, user, date)
1348 1348 newstate[s] = (newstate[s][0], sr)
1349 1349 subrepo.writestate(self, newstate)
1350 1350
1351 1351 p1, p2 = self.dirstate.parents()
1352 1352 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1353 1353 try:
1354 1354 self.hook("precommit", throw=True, parent1=hookp1,
1355 1355 parent2=hookp2)
1356 1356 ret = self.commitctx(cctx, True)
1357 1357 except: # re-raises
1358 1358 if edited:
1359 1359 self.ui.write(
1360 1360 _('note: commit message saved in %s\n') % msgfn)
1361 1361 raise
1362 1362
1363 1363 # update bookmarks, dirstate and mergestate
1364 1364 bookmarks.update(self, [p1, p2], ret)
1365 1365 cctx.markcommitted(ret)
1366 1366 ms.reset()
1367 1367 finally:
1368 1368 wlock.release()
1369 1369
1370 1370 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1371 1371 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1372 1372 self._afterlock(commithook)
1373 1373 return ret
1374 1374
1375 1375 @unfilteredmethod
1376 1376 def commitctx(self, ctx, error=False):
1377 1377 """Add a new revision to current repository.
1378 1378 Revision information is passed via the context argument.
1379 1379 """
1380 1380
1381 1381 tr = None
1382 1382 p1, p2 = ctx.p1(), ctx.p2()
1383 1383 user = ctx.user()
1384 1384
1385 1385 lock = self.lock()
1386 1386 try:
1387 1387 tr = self.transaction("commit")
1388 1388 trp = weakref.proxy(tr)
1389 1389
1390 1390 if ctx.files():
1391 1391 m1 = p1.manifest()
1392 1392 m2 = p2.manifest()
1393 1393 m = m1.copy()
1394 1394
1395 1395 # check in files
1396 1396 added = []
1397 1397 changed = []
1398 1398 removed = list(ctx.removed())
1399 1399 linkrev = len(self)
1400 1400 for f in sorted(ctx.modified() + ctx.added()):
1401 1401 self.ui.note(f + "\n")
1402 1402 try:
1403 1403 fctx = ctx[f]
1404 1404 if fctx is None:
1405 1405 removed.append(f)
1406 1406 else:
1407 1407 added.append(f)
1408 1408 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1409 1409 trp, changed)
1410 m.set(f, fctx.flags())
1410 m.setflag(f, fctx.flags())
1411 1411 except OSError, inst:
1412 1412 self.ui.warn(_("trouble committing %s!\n") % f)
1413 1413 raise
1414 1414 except IOError, inst:
1415 1415 errcode = getattr(inst, 'errno', errno.ENOENT)
1416 1416 if error or errcode and errcode != errno.ENOENT:
1417 1417 self.ui.warn(_("trouble committing %s!\n") % f)
1418 1418 raise
1419 1419
1420 1420 # update manifest
1421 1421 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1422 1422 drop = [f for f in removed if f in m]
1423 1423 for f in drop:
1424 1424 del m[f]
1425 1425 mn = self.manifest.add(m, trp, linkrev,
1426 1426 p1.manifestnode(), p2.manifestnode(),
1427 1427 added, drop)
1428 1428 files = changed + removed
1429 1429 else:
1430 1430 mn = p1.manifestnode()
1431 1431 files = []
1432 1432
1433 1433 # update changelog
1434 1434 self.changelog.delayupdate()
1435 1435 n = self.changelog.add(mn, files, ctx.description(),
1436 1436 trp, p1.node(), p2.node(),
1437 1437 user, ctx.date(), ctx.extra().copy())
1438 1438 p = lambda: self.changelog.writepending() and self.root or ""
1439 1439 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1440 1440 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1441 1441 parent2=xp2, pending=p)
1442 1442 self.changelog.finalize(trp)
1443 1443 # set the new commit is proper phase
1444 1444 targetphase = subrepo.newcommitphase(self.ui, ctx)
1445 1445 if targetphase:
1446 1446 # retract boundary do not alter parent changeset.
1447 1447 # if a parent have higher the resulting phase will
1448 1448 # be compliant anyway
1449 1449 #
1450 1450 # if minimal phase was 0 we don't need to retract anything
1451 1451 phases.retractboundary(self, tr, targetphase, [n])
1452 1452 tr.close()
1453 1453 branchmap.updatecache(self.filtered('served'))
1454 1454 return n
1455 1455 finally:
1456 1456 if tr:
1457 1457 tr.release()
1458 1458 lock.release()
1459 1459
1460 1460 @unfilteredmethod
1461 1461 def destroying(self):
1462 1462 '''Inform the repository that nodes are about to be destroyed.
1463 1463 Intended for use by strip and rollback, so there's a common
1464 1464 place for anything that has to be done before destroying history.
1465 1465
1466 1466 This is mostly useful for saving state that is in memory and waiting
1467 1467 to be flushed when the current lock is released. Because a call to
1468 1468 destroyed is imminent, the repo will be invalidated causing those
1469 1469 changes to stay in memory (waiting for the next unlock), or vanish
1470 1470 completely.
1471 1471 '''
1472 1472 # When using the same lock to commit and strip, the phasecache is left
1473 1473 # dirty after committing. Then when we strip, the repo is invalidated,
1474 1474 # causing those changes to disappear.
1475 1475 if '_phasecache' in vars(self):
1476 1476 self._phasecache.write()
1477 1477
1478 1478 @unfilteredmethod
1479 1479 def destroyed(self):
1480 1480 '''Inform the repository that nodes have been destroyed.
1481 1481 Intended for use by strip and rollback, so there's a common
1482 1482 place for anything that has to be done after destroying history.
1483 1483 '''
1484 1484 # When one tries to:
1485 1485 # 1) destroy nodes thus calling this method (e.g. strip)
1486 1486 # 2) use phasecache somewhere (e.g. commit)
1487 1487 #
1488 1488 # then 2) will fail because the phasecache contains nodes that were
1489 1489 # removed. We can either remove phasecache from the filecache,
1490 1490 # causing it to reload next time it is accessed, or simply filter
1491 1491 # the removed nodes now and write the updated cache.
1492 1492 self._phasecache.filterunknown(self)
1493 1493 self._phasecache.write()
1494 1494
1495 1495 # update the 'served' branch cache to help read only server process
1496 1496 # Thanks to branchcache collaboration this is done from the nearest
1497 1497 # filtered subset and it is expected to be fast.
1498 1498 branchmap.updatecache(self.filtered('served'))
1499 1499
1500 1500 # Ensure the persistent tag cache is updated. Doing it now
1501 1501 # means that the tag cache only has to worry about destroyed
1502 1502 # heads immediately after a strip/rollback. That in turn
1503 1503 # guarantees that "cachetip == currenttip" (comparing both rev
1504 1504 # and node) always means no nodes have been added or destroyed.
1505 1505
1506 1506 # XXX this is suboptimal when qrefresh'ing: we strip the current
1507 1507 # head, refresh the tag cache, then immediately add a new head.
1508 1508 # But I think doing it this way is necessary for the "instant
1509 1509 # tag cache retrieval" case to work.
1510 1510 self.invalidate()
1511 1511
1512 1512 def walk(self, match, node=None):
1513 1513 '''
1514 1514 walk recursively through the directory tree or a given
1515 1515 changeset, finding all files matched by the match
1516 1516 function
1517 1517 '''
1518 1518 return self[node].walk(match)
1519 1519
1520 1520 def status(self, node1='.', node2=None, match=None,
1521 1521 ignored=False, clean=False, unknown=False,
1522 1522 listsubrepos=False):
1523 1523 '''a convenience method that calls node1.status(node2)'''
1524 1524 return self[node1].status(node2, match, ignored, clean, unknown,
1525 1525 listsubrepos)
1526 1526
1527 1527 def heads(self, start=None):
1528 1528 heads = self.changelog.heads(start)
1529 1529 # sort the output in rev descending order
1530 1530 return sorted(heads, key=self.changelog.rev, reverse=True)
1531 1531
1532 1532 def branchheads(self, branch=None, start=None, closed=False):
1533 1533 '''return a (possibly filtered) list of heads for the given branch
1534 1534
1535 1535 Heads are returned in topological order, from newest to oldest.
1536 1536 If branch is None, use the dirstate branch.
1537 1537 If start is not None, return only heads reachable from start.
1538 1538 If closed is True, return heads that are marked as closed as well.
1539 1539 '''
1540 1540 if branch is None:
1541 1541 branch = self[None].branch()
1542 1542 branches = self.branchmap()
1543 1543 if branch not in branches:
1544 1544 return []
1545 1545 # the cache returns heads ordered lowest to highest
1546 1546 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1547 1547 if start is not None:
1548 1548 # filter out the heads that cannot be reached from startrev
1549 1549 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1550 1550 bheads = [h for h in bheads if h in fbheads]
1551 1551 return bheads
1552 1552
1553 1553 def branches(self, nodes):
1554 1554 if not nodes:
1555 1555 nodes = [self.changelog.tip()]
1556 1556 b = []
1557 1557 for n in nodes:
1558 1558 t = n
1559 1559 while True:
1560 1560 p = self.changelog.parents(n)
1561 1561 if p[1] != nullid or p[0] == nullid:
1562 1562 b.append((t, n, p[0], p[1]))
1563 1563 break
1564 1564 n = p[0]
1565 1565 return b
1566 1566
1567 1567 def between(self, pairs):
1568 1568 r = []
1569 1569
1570 1570 for top, bottom in pairs:
1571 1571 n, l, i = top, [], 0
1572 1572 f = 1
1573 1573
1574 1574 while n != bottom and n != nullid:
1575 1575 p = self.changelog.parents(n)[0]
1576 1576 if i == f:
1577 1577 l.append(n)
1578 1578 f = f * 2
1579 1579 n = p
1580 1580 i += 1
1581 1581
1582 1582 r.append(l)
1583 1583
1584 1584 return r
1585 1585
1586 1586 def checkpush(self, pushop):
1587 1587 """Extensions can override this function if additional checks have
1588 1588 to be performed before pushing, or call it if they override push
1589 1589 command.
1590 1590 """
1591 1591 pass
1592 1592
1593 1593 @unfilteredpropertycache
1594 1594 def prepushoutgoinghooks(self):
1595 1595 """Return util.hooks consists of "(repo, remote, outgoing)"
1596 1596 functions, which are called before pushing changesets.
1597 1597 """
1598 1598 return util.hooks()
1599 1599
1600 1600 def stream_in(self, remote, requirements):
1601 1601 lock = self.lock()
1602 1602 try:
1603 1603 # Save remote branchmap. We will use it later
1604 1604 # to speed up branchcache creation
1605 1605 rbranchmap = None
1606 1606 if remote.capable("branchmap"):
1607 1607 rbranchmap = remote.branchmap()
1608 1608
1609 1609 fp = remote.stream_out()
1610 1610 l = fp.readline()
1611 1611 try:
1612 1612 resp = int(l)
1613 1613 except ValueError:
1614 1614 raise error.ResponseError(
1615 1615 _('unexpected response from remote server:'), l)
1616 1616 if resp == 1:
1617 1617 raise util.Abort(_('operation forbidden by server'))
1618 1618 elif resp == 2:
1619 1619 raise util.Abort(_('locking the remote repository failed'))
1620 1620 elif resp != 0:
1621 1621 raise util.Abort(_('the server sent an unknown error code'))
1622 1622 self.ui.status(_('streaming all changes\n'))
1623 1623 l = fp.readline()
1624 1624 try:
1625 1625 total_files, total_bytes = map(int, l.split(' ', 1))
1626 1626 except (ValueError, TypeError):
1627 1627 raise error.ResponseError(
1628 1628 _('unexpected response from remote server:'), l)
1629 1629 self.ui.status(_('%d files to transfer, %s of data\n') %
1630 1630 (total_files, util.bytecount(total_bytes)))
1631 1631 handled_bytes = 0
1632 1632 self.ui.progress(_('clone'), 0, total=total_bytes)
1633 1633 start = time.time()
1634 1634
1635 1635 tr = self.transaction(_('clone'))
1636 1636 try:
1637 1637 for i in xrange(total_files):
1638 1638 # XXX doesn't support '\n' or '\r' in filenames
1639 1639 l = fp.readline()
1640 1640 try:
1641 1641 name, size = l.split('\0', 1)
1642 1642 size = int(size)
1643 1643 except (ValueError, TypeError):
1644 1644 raise error.ResponseError(
1645 1645 _('unexpected response from remote server:'), l)
1646 1646 if self.ui.debugflag:
1647 1647 self.ui.debug('adding %s (%s)\n' %
1648 1648 (name, util.bytecount(size)))
1649 1649 # for backwards compat, name was partially encoded
1650 1650 ofp = self.sopener(store.decodedir(name), 'w')
1651 1651 for chunk in util.filechunkiter(fp, limit=size):
1652 1652 handled_bytes += len(chunk)
1653 1653 self.ui.progress(_('clone'), handled_bytes,
1654 1654 total=total_bytes)
1655 1655 ofp.write(chunk)
1656 1656 ofp.close()
1657 1657 tr.close()
1658 1658 finally:
1659 1659 tr.release()
1660 1660
1661 1661 # Writing straight to files circumvented the inmemory caches
1662 1662 self.invalidate()
1663 1663
1664 1664 elapsed = time.time() - start
1665 1665 if elapsed <= 0:
1666 1666 elapsed = 0.001
1667 1667 self.ui.progress(_('clone'), None)
1668 1668 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1669 1669 (util.bytecount(total_bytes), elapsed,
1670 1670 util.bytecount(total_bytes / elapsed)))
1671 1671
1672 1672 # new requirements = old non-format requirements +
1673 1673 # new format-related
1674 1674 # requirements from the streamed-in repository
1675 1675 requirements.update(set(self.requirements) - self.supportedformats)
1676 1676 self._applyrequirements(requirements)
1677 1677 self._writerequirements()
1678 1678
1679 1679 if rbranchmap:
1680 1680 rbheads = []
1681 1681 for bheads in rbranchmap.itervalues():
1682 1682 rbheads.extend(bheads)
1683 1683
1684 1684 if rbheads:
1685 1685 rtiprev = max((int(self.changelog.rev(node))
1686 1686 for node in rbheads))
1687 1687 cache = branchmap.branchcache(rbranchmap,
1688 1688 self[rtiprev].node(),
1689 1689 rtiprev)
1690 1690 # Try to stick it as low as possible
1691 1691 # filter above served are unlikely to be fetch from a clone
1692 1692 for candidate in ('base', 'immutable', 'served'):
1693 1693 rview = self.filtered(candidate)
1694 1694 if cache.validfor(rview):
1695 1695 self._branchcaches[candidate] = cache
1696 1696 cache.write(rview)
1697 1697 break
1698 1698 self.invalidate()
1699 1699 return len(self.heads()) + 1
1700 1700 finally:
1701 1701 lock.release()
1702 1702
1703 1703 def clone(self, remote, heads=[], stream=False):
1704 1704 '''clone remote repository.
1705 1705
1706 1706 keyword arguments:
1707 1707 heads: list of revs to clone (forces use of pull)
1708 1708 stream: use streaming clone if possible'''
1709 1709
1710 1710 # now, all clients that can request uncompressed clones can
1711 1711 # read repo formats supported by all servers that can serve
1712 1712 # them.
1713 1713
1714 1714 # if revlog format changes, client will have to check version
1715 1715 # and format flags on "stream" capability, and use
1716 1716 # uncompressed only if compatible.
1717 1717
1718 1718 if not stream:
1719 1719 # if the server explicitly prefers to stream (for fast LANs)
1720 1720 stream = remote.capable('stream-preferred')
1721 1721
1722 1722 if stream and not heads:
1723 1723 # 'stream' means remote revlog format is revlogv1 only
1724 1724 if remote.capable('stream'):
1725 1725 return self.stream_in(remote, set(('revlogv1',)))
1726 1726 # otherwise, 'streamreqs' contains the remote revlog format
1727 1727 streamreqs = remote.capable('streamreqs')
1728 1728 if streamreqs:
1729 1729 streamreqs = set(streamreqs.split(','))
1730 1730 # if we support it, stream in and adjust our requirements
1731 1731 if not streamreqs - self.supportedformats:
1732 1732 return self.stream_in(remote, streamreqs)
1733 1733
1734 1734 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1735 1735 try:
1736 1736 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1737 1737 ret = exchange.pull(self, remote, heads).cgresult
1738 1738 finally:
1739 1739 self.ui.restoreconfig(quiet)
1740 1740 return ret
1741 1741
1742 1742 def pushkey(self, namespace, key, old, new):
1743 1743 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1744 1744 old=old, new=new)
1745 1745 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1746 1746 ret = pushkey.push(self, namespace, key, old, new)
1747 1747 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1748 1748 ret=ret)
1749 1749 return ret
1750 1750
1751 1751 def listkeys(self, namespace):
1752 1752 self.hook('prelistkeys', throw=True, namespace=namespace)
1753 1753 self.ui.debug('listing keys for "%s"\n' % namespace)
1754 1754 values = pushkey.list(self, namespace)
1755 1755 self.hook('listkeys', namespace=namespace, values=values)
1756 1756 return values
1757 1757
1758 1758 def debugwireargs(self, one, two, three=None, four=None, five=None):
1759 1759 '''used to test argument passing over the wire'''
1760 1760 return "%s %s %s %s %s" % (one, two, three, four, five)
1761 1761
1762 1762 def savecommitmessage(self, text):
1763 1763 fp = self.opener('last-message.txt', 'wb')
1764 1764 try:
1765 1765 fp.write(text)
1766 1766 finally:
1767 1767 fp.close()
1768 1768 return self.pathto(fp.name[len(self.root) + 1:])
1769 1769
1770 1770 # used to avoid circular references so destructors work
1771 1771 def aftertrans(files):
1772 1772 renamefiles = [tuple(t) for t in files]
1773 1773 def a():
1774 1774 for vfs, src, dest in renamefiles:
1775 1775 try:
1776 1776 vfs.rename(src, dest)
1777 1777 except OSError: # journal file does not yet exist
1778 1778 pass
1779 1779 return a
1780 1780
1781 1781 def undoname(fn):
1782 1782 base, name = os.path.split(fn)
1783 1783 assert name.startswith('journal')
1784 1784 return os.path.join(base, name.replace('journal', 'undo', 1))
1785 1785
1786 1786 def instance(ui, path, create):
1787 1787 return localrepository(ui, util.urllocalpath(path), create)
1788 1788
1789 1789 def islocal(path):
1790 1790 return True
@@ -1,243 +1,244 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import mdiff, parsers, error, revlog, util, dicthelpers
10 10 import array, struct
11 11
12 12 class manifestdict(dict):
13 13 def __init__(self, mapping=None, flags=None):
14 14 if mapping is None:
15 15 mapping = {}
16 16 if flags is None:
17 17 flags = {}
18 18 dict.__init__(self, mapping)
19 19 self._flags = flags
20 20 def flags(self, f):
21 21 return self._flags.get(f, "")
22 22 def withflags(self):
23 23 return set(self._flags.keys())
24 def set(self, f, flags):
24 def setflag(self, f, flags):
25 """Set the flags (symlink, executable) for path f."""
25 26 self._flags[f] = flags
26 27 def copy(self):
27 28 return manifestdict(self, dict.copy(self._flags))
28 29 def intersectfiles(self, files):
29 30 '''make a new manifestdict with the intersection of self with files
30 31
31 32 The algorithm assumes that files is much smaller than self.'''
32 33 ret = manifestdict()
33 34 for fn in files:
34 35 if fn in self:
35 36 ret[fn] = self[fn]
36 37 flags = self._flags.get(fn, None)
37 38 if flags:
38 39 ret._flags[fn] = flags
39 40 return ret
40 41 def flagsdiff(self, d2):
41 42 return dicthelpers.diff(self._flags, d2._flags, "")
42 43
43 44 def text(self):
44 45 fl = sorted(self)
45 46 _checkforbidden(fl)
46 47
47 48 hex, flags = revlog.hex, self.flags
48 49 # if this is changed to support newlines in filenames,
49 50 # be sure to check the templates/ dir again (especially *-raw.tmpl)
50 51 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
51 52
52 53 def fastdelta(self, base, changes):
53 54 """Given a base manifest text as an array.array and a list of changes
54 55 relative to that text, compute a delta that can be used by revlog.
55 56 """
56 57 delta = []
57 58 dstart = None
58 59 dend = None
59 60 dline = [""]
60 61 start = 0
61 62 # zero copy representation of base as a buffer
62 63 addbuf = util.buffer(base)
63 64
64 65 # start with a readonly loop that finds the offset of
65 66 # each line and creates the deltas
66 67 for f, todelete in changes:
67 68 # bs will either be the index of the item or the insert point
68 69 start, end = _msearch(addbuf, f, start)
69 70 if not todelete:
70 71 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
71 72 else:
72 73 if start == end:
73 74 # item we want to delete was not found, error out
74 75 raise AssertionError(
75 76 _("failed to remove %s from manifest") % f)
76 77 l = ""
77 78 if dstart is not None and dstart <= start and dend >= start:
78 79 if dend < end:
79 80 dend = end
80 81 if l:
81 82 dline.append(l)
82 83 else:
83 84 if dstart is not None:
84 85 delta.append([dstart, dend, "".join(dline)])
85 86 dstart = start
86 87 dend = end
87 88 dline = [l]
88 89
89 90 if dstart is not None:
90 91 delta.append([dstart, dend, "".join(dline)])
91 92 # apply the delta to the base, and get a delta for addrevision
92 93 deltatext, arraytext = _addlistdelta(base, delta)
93 94 return arraytext, deltatext
94 95
95 96 def _msearch(m, s, lo=0, hi=None):
96 97 '''return a tuple (start, end) that says where to find s within m.
97 98
98 99 If the string is found m[start:end] are the line containing
99 100 that string. If start == end the string was not found and
100 101 they indicate the proper sorted insertion point.
101 102
102 103 m should be a buffer or a string
103 104 s is a string'''
104 105 def advance(i, c):
105 106 while i < lenm and m[i] != c:
106 107 i += 1
107 108 return i
108 109 if not s:
109 110 return (lo, lo)
110 111 lenm = len(m)
111 112 if not hi:
112 113 hi = lenm
113 114 while lo < hi:
114 115 mid = (lo + hi) // 2
115 116 start = mid
116 117 while start > 0 and m[start - 1] != '\n':
117 118 start -= 1
118 119 end = advance(start, '\0')
119 120 if m[start:end] < s:
120 121 # we know that after the null there are 40 bytes of sha1
121 122 # this translates to the bisect lo = mid + 1
122 123 lo = advance(end + 40, '\n') + 1
123 124 else:
124 125 # this translates to the bisect hi = mid
125 126 hi = start
126 127 end = advance(lo, '\0')
127 128 found = m[lo:end]
128 129 if s == found:
129 130 # we know that after the null there are 40 bytes of sha1
130 131 end = advance(end + 40, '\n')
131 132 return (lo, end + 1)
132 133 else:
133 134 return (lo, lo)
134 135
135 136 def _checkforbidden(l):
136 137 """Check filenames for illegal characters."""
137 138 for f in l:
138 139 if '\n' in f or '\r' in f:
139 140 raise error.RevlogError(
140 141 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
141 142
142 143
143 144 # apply the changes collected during the bisect loop to our addlist
144 145 # return a delta suitable for addrevision
145 146 def _addlistdelta(addlist, x):
146 147 # for large addlist arrays, building a new array is cheaper
147 148 # than repeatedly modifying the existing one
148 149 currentposition = 0
149 150 newaddlist = array.array('c')
150 151
151 152 for start, end, content in x:
152 153 newaddlist += addlist[currentposition:start]
153 154 if content:
154 155 newaddlist += array.array('c', content)
155 156
156 157 currentposition = end
157 158
158 159 newaddlist += addlist[currentposition:]
159 160
160 161 deltatext = "".join(struct.pack(">lll", start, end, len(content))
161 162 + content for start, end, content in x)
162 163 return deltatext, newaddlist
163 164
164 165 def _parse(lines):
165 166 mfdict = manifestdict()
166 167 parsers.parse_manifest(mfdict, mfdict._flags, lines)
167 168 return mfdict
168 169
169 170 class manifest(revlog.revlog):
170 171 def __init__(self, opener):
171 172 # we expect to deal with not more than four revs at a time,
172 173 # during a commit --amend
173 174 self._mancache = util.lrucachedict(4)
174 175 revlog.revlog.__init__(self, opener, "00manifest.i")
175 176
176 177 def readdelta(self, node):
177 178 r = self.rev(node)
178 179 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
179 180
180 181 def readfast(self, node):
181 182 '''use the faster of readdelta or read'''
182 183 r = self.rev(node)
183 184 deltaparent = self.deltaparent(r)
184 185 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
185 186 return self.readdelta(node)
186 187 return self.read(node)
187 188
188 189 def read(self, node):
189 190 if node == revlog.nullid:
190 191 return manifestdict() # don't upset local cache
191 192 if node in self._mancache:
192 193 return self._mancache[node][0]
193 194 text = self.revision(node)
194 195 arraytext = array.array('c', text)
195 196 mapping = _parse(text)
196 197 self._mancache[node] = (mapping, arraytext)
197 198 return mapping
198 199
199 200 def find(self, node, f):
200 201 '''look up entry for a single file efficiently.
201 202 return (node, flags) pair if found, (None, None) if not.'''
202 203 if node in self._mancache:
203 204 mapping = self._mancache[node][0]
204 205 return mapping.get(f), mapping.flags(f)
205 206 text = self.revision(node)
206 207 start, end = _msearch(text, f)
207 208 if start == end:
208 209 return None, None
209 210 l = text[start:end]
210 211 f, n = l.split('\0')
211 212 return revlog.bin(n[:40]), n[40:-1]
212 213
213 214 def add(self, map, transaction, link, p1, p2, added, removed):
214 215 if p1 in self._mancache:
215 216 # If our first parent is in the manifest cache, we can
216 217 # compute a delta here using properties we know about the
217 218 # manifest up-front, which may save time later for the
218 219 # revlog layer.
219 220
220 221 _checkforbidden(added)
221 222 # combine the changed lists into one list for sorting
222 223 work = [(x, False) for x in added]
223 224 work.extend((x, True) for x in removed)
224 225 # this could use heapq.merge() (from Python 2.6+) or equivalent
225 226 # since the lists are already sorted
226 227 work.sort()
227 228
228 229 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
229 230 cachedelta = self.rev(p1), deltatext
230 231 text = util.buffer(arraytext)
231 232 else:
232 233 # The first parent manifest isn't already loaded, so we'll
233 234 # just encode a fulltext of the manifest and pass that
234 235 # through to the revlog layer, and let it handle the delta
235 236 # process.
236 237 text = map.text()
237 238 arraytext = array.array('c', text)
238 239 cachedelta = None
239 240
240 241 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
241 242 self._mancache[n] = (map, arraytext)
242 243
243 244 return n
General Comments 0
You need to be logged in to leave comments. Login now