##// END OF EJS Templates
repo: move unfiltered-repo optimization to workingctx...
Martin von Zweigbergk -
r39995:43d3b09b default
parent child Browse files
Show More
@@ -1,2437 +1,2439 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirid,
25 25 )
26 26 from . import (
27 27 dagop,
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 obsolete as obsmod,
33 33 patch,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 scmutil,
39 39 sparse,
40 40 subrepo,
41 41 subrepoutil,
42 42 util,
43 43 )
44 44 from .utils import (
45 45 dateutil,
46 46 stringutil,
47 47 )
48 48
49 49 propertycache = util.propertycache
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58
59 59 def __init__(self, repo):
60 60 self._repo = repo
61 61
62 62 def __bytes__(self):
63 63 return short(self.node())
64 64
65 65 __str__ = encoding.strmethod(__bytes__)
66 66
67 67 def __repr__(self):
68 68 return r"<%s %s>" % (type(self).__name__, str(self))
69 69
70 70 def __eq__(self, other):
71 71 try:
72 72 return type(self) == type(other) and self._rev == other._rev
73 73 except AttributeError:
74 74 return False
75 75
76 76 def __ne__(self, other):
77 77 return not (self == other)
78 78
79 79 def __contains__(self, key):
80 80 return key in self._manifest
81 81
82 82 def __getitem__(self, key):
83 83 return self.filectx(key)
84 84
85 85 def __iter__(self):
86 86 return iter(self._manifest)
87 87
88 88 def _buildstatusmanifest(self, status):
89 89 """Builds a manifest that includes the given status results, if this is
90 90 a working copy context. For non-working copy contexts, it just returns
91 91 the normal manifest."""
92 92 return self.manifest()
93 93
94 94 def _matchstatus(self, other, match):
95 95 """This internal method provides a way for child objects to override the
96 96 match operator.
97 97 """
98 98 return match
99 99
100 100 def _buildstatus(self, other, s, match, listignored, listclean,
101 101 listunknown):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.iteritems():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [fn for fn in unknown if fn not in mf1 and
149 149 (not match or match(fn))]
150 150 ignored = [fn for fn in ignored if fn not in mf1 and
151 151 (not match or match(fn))]
152 152 # if they're deleted, don't report them as removed
153 153 removed = [fn for fn in removed if fn not in deletedset]
154 154
155 155 return scmutil.status(modified, added, removed, deleted, unknown,
156 156 ignored, clean)
157 157
158 158 @propertycache
159 159 def substate(self):
160 160 return subrepoutil.state(self, self._repo.ui)
161 161
162 162 def subrev(self, subpath):
163 163 return self.substate[subpath][1]
164 164
165 165 def rev(self):
166 166 return self._rev
167 167 def node(self):
168 168 return self._node
169 169 def hex(self):
170 170 return hex(self.node())
171 171 def manifest(self):
172 172 return self._manifest
173 173 def manifestctx(self):
174 174 return self._manifestctx
175 175 def repo(self):
176 176 return self._repo
177 177 def phasestr(self):
178 178 return phases.phasenames[self.phase()]
179 179 def mutable(self):
180 180 return self.phase() > phases.public
181 181
182 182 def matchfileset(self, expr, badfn=None):
183 183 return fileset.match(self, expr, badfn=badfn)
184 184
185 185 def obsolete(self):
186 186 """True if the changeset is obsolete"""
187 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 188
189 189 def extinct(self):
190 190 """True if the changeset is extinct"""
191 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 192
193 193 def orphan(self):
194 194 """True if the changeset is not obsolete, but its ancestor is"""
195 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 196
197 197 def phasedivergent(self):
198 198 """True if the changeset tries to be a successor of a public changeset
199 199
200 200 Only non-public and non-obsolete changesets may be phase-divergent.
201 201 """
202 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 203
204 204 def contentdivergent(self):
205 205 """Is a successor of a changeset with multiple possible successor sets
206 206
207 207 Only non-public and non-obsolete changesets may be content-divergent.
208 208 """
209 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 210
211 211 def isunstable(self):
212 212 """True if the changeset is either orphan, phase-divergent or
213 213 content-divergent"""
214 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 215
216 216 def instabilities(self):
217 217 """return the list of instabilities affecting this changeset.
218 218
219 219 Instabilities are returned as strings. possible values are:
220 220 - orphan,
221 221 - phase-divergent,
222 222 - content-divergent.
223 223 """
224 224 instabilities = []
225 225 if self.orphan():
226 226 instabilities.append('orphan')
227 227 if self.phasedivergent():
228 228 instabilities.append('phase-divergent')
229 229 if self.contentdivergent():
230 230 instabilities.append('content-divergent')
231 231 return instabilities
232 232
233 233 def parents(self):
234 234 """return contexts for each parent changeset"""
235 235 return self._parents
236 236
237 237 def p1(self):
238 238 return self._parents[0]
239 239
240 240 def p2(self):
241 241 parents = self._parents
242 242 if len(parents) == 2:
243 243 return parents[1]
244 244 return self._repo[nullrev]
245 245
246 246 def _fileinfo(self, path):
247 247 if r'_manifest' in self.__dict__:
248 248 try:
249 249 return self._manifest[path], self._manifest.flags(path)
250 250 except KeyError:
251 251 raise error.ManifestLookupError(self._node, path,
252 252 _('not found in manifest'))
253 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 254 if path in self._manifestdelta:
255 255 return (self._manifestdelta[path],
256 256 self._manifestdelta.flags(path))
257 257 mfl = self._repo.manifestlog
258 258 try:
259 259 node, flag = mfl[self._changeset.manifest].find(path)
260 260 except KeyError:
261 261 raise error.ManifestLookupError(self._node, path,
262 262 _('not found in manifest'))
263 263
264 264 return node, flag
265 265
266 266 def filenode(self, path):
267 267 return self._fileinfo(path)[0]
268 268
269 269 def flags(self, path):
270 270 try:
271 271 return self._fileinfo(path)[1]
272 272 except error.LookupError:
273 273 return ''
274 274
275 275 def sub(self, path, allowcreate=True):
276 276 '''return a subrepo for the stored revision of path, never wdir()'''
277 277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 278
279 279 def nullsub(self, path, pctx):
280 280 return subrepo.nullsubrepo(self, path, pctx)
281 281
282 282 def workingsub(self, path):
283 283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 284 context.
285 285 '''
286 286 return subrepo.subrepo(self, path, allowwdir=True)
287 287
288 288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 289 listsubrepos=False, badfn=None):
290 290 r = self._repo
291 291 return matchmod.match(r.root, r.getcwd(), pats,
292 292 include, exclude, default,
293 293 auditor=r.nofsauditor, ctx=self,
294 294 listsubrepos=listsubrepos, badfn=badfn)
295 295
296 296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 297 losedatafn=None, prefix='', relroot='', copy=None,
298 298 hunksfilterfn=None):
299 299 """Returns a diff generator for the given contexts and matcher"""
300 300 if ctx2 is None:
301 301 ctx2 = self.p1()
302 302 if ctx2 is not None:
303 303 ctx2 = self._repo[ctx2]
304 304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 306 relroot=relroot, copy=copy,
307 307 hunksfilterfn=hunksfilterfn)
308 308
309 309 def dirs(self):
310 310 return self._manifest.dirs()
311 311
312 312 def hasdir(self, dir):
313 313 return self._manifest.hasdir(dir)
314 314
315 315 def status(self, other=None, match=None, listignored=False,
316 316 listclean=False, listunknown=False, listsubrepos=False):
317 317 """return status of files between two nodes or node and working
318 318 directory.
319 319
320 320 If other is None, compare this node with working directory.
321 321
322 322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 323 """
324 324
325 325 ctx1 = self
326 326 ctx2 = self._repo[other]
327 327
328 328 # This next code block is, admittedly, fragile logic that tests for
329 329 # reversing the contexts and wouldn't need to exist if it weren't for
330 330 # the fast (and common) code path of comparing the working directory
331 331 # with its first parent.
332 332 #
333 333 # What we're aiming for here is the ability to call:
334 334 #
335 335 # workingctx.status(parentctx)
336 336 #
337 337 # If we always built the manifest for each context and compared those,
338 338 # then we'd be done. But the special case of the above call means we
339 339 # just copy the manifest of the parent.
340 340 reversed = False
341 341 if (not isinstance(ctx1, changectx)
342 342 and isinstance(ctx2, changectx)):
343 343 reversed = True
344 344 ctx1, ctx2 = ctx2, ctx1
345 345
346 346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 347 match = ctx2._matchstatus(ctx1, match)
348 348 r = scmutil.status([], [], [], [], [], [], [])
349 349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 350 listunknown)
351 351
352 352 if reversed:
353 353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 354 # these make no sense to reverse.
355 355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 356 r.clean)
357 357
358 358 if listsubrepos:
359 359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 360 try:
361 361 rev2 = ctx2.subrev(subpath)
362 362 except KeyError:
363 363 # A subrepo that existed in node1 was deleted between
364 364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 365 # won't contain that subpath. The best we can do ignore it.
366 366 rev2 = None
367 367 submatch = matchmod.subdirmatcher(subpath, match)
368 368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 369 clean=listclean, unknown=listunknown,
370 370 listsubrepos=True)
371 371 for rfiles, sfiles in zip(r, s):
372 372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 373
374 374 narrowmatch = self._repo.narrowmatch()
375 375 if not narrowmatch.always():
376 376 for l in r:
377 377 l[:] = list(filter(narrowmatch, l))
378 378 for l in r:
379 379 l.sort()
380 380
381 381 return r
382 382
383 383 class changectx(basectx):
384 384 """A changecontext object makes access to data related to a particular
385 385 changeset convenient. It represents a read-only context already present in
386 386 the repo."""
387 387 def __init__(self, repo, rev, node):
388 388 """changeid is a revision number, node, or tag"""
389 389 super(changectx, self).__init__(repo)
390 390 self._rev = rev
391 391 self._node = node
392 392
393 393 def __hash__(self):
394 394 try:
395 395 return hash(self._rev)
396 396 except AttributeError:
397 397 return id(self)
398 398
399 399 def __nonzero__(self):
400 400 return self._rev != nullrev
401 401
402 402 __bool__ = __nonzero__
403 403
404 404 @propertycache
405 405 def _changeset(self):
406 406 return self._repo.changelog.changelogrevision(self.rev())
407 407
408 408 @propertycache
409 409 def _manifest(self):
410 410 return self._manifestctx.read()
411 411
412 412 @property
413 413 def _manifestctx(self):
414 414 return self._repo.manifestlog[self._changeset.manifest]
415 415
416 416 @propertycache
417 417 def _manifestdelta(self):
418 418 return self._manifestctx.readdelta()
419 419
420 420 @propertycache
421 421 def _parents(self):
422 422 repo = self._repo
423 423 p1, p2 = repo.changelog.parentrevs(self._rev)
424 424 if p2 == nullrev:
425 425 return [repo[p1]]
426 426 return [repo[p1], repo[p2]]
427 427
428 428 def changeset(self):
429 429 c = self._changeset
430 430 return (
431 431 c.manifest,
432 432 c.user,
433 433 c.date,
434 434 c.files,
435 435 c.description,
436 436 c.extra,
437 437 )
438 438 def manifestnode(self):
439 439 return self._changeset.manifest
440 440
441 441 def user(self):
442 442 return self._changeset.user
443 443 def date(self):
444 444 return self._changeset.date
445 445 def files(self):
446 446 return self._changeset.files
447 447 def description(self):
448 448 return self._changeset.description
449 449 def branch(self):
450 450 return encoding.tolocal(self._changeset.extra.get("branch"))
451 451 def closesbranch(self):
452 452 return 'close' in self._changeset.extra
453 453 def extra(self):
454 454 """Return a dict of extra information."""
455 455 return self._changeset.extra
456 456 def tags(self):
457 457 """Return a list of byte tag names"""
458 458 return self._repo.nodetags(self._node)
459 459 def bookmarks(self):
460 460 """Return a list of byte bookmark names."""
461 461 return self._repo.nodebookmarks(self._node)
462 462 def phase(self):
463 463 return self._repo._phasecache.phase(self._repo, self._rev)
464 464 def hidden(self):
465 465 return self._rev in repoview.filterrevs(self._repo, 'visible')
466 466
467 467 def isinmemory(self):
468 468 return False
469 469
470 470 def children(self):
471 471 """return list of changectx contexts for each child changeset.
472 472
473 473 This returns only the immediate child changesets. Use descendants() to
474 474 recursively walk children.
475 475 """
476 476 c = self._repo.changelog.children(self._node)
477 477 return [self._repo[x] for x in c]
478 478
479 479 def ancestors(self):
480 480 for a in self._repo.changelog.ancestors([self._rev]):
481 481 yield self._repo[a]
482 482
483 483 def descendants(self):
484 484 """Recursively yield all children of the changeset.
485 485
486 486 For just the immediate children, use children()
487 487 """
488 488 for d in self._repo.changelog.descendants([self._rev]):
489 489 yield self._repo[d]
490 490
491 491 def filectx(self, path, fileid=None, filelog=None):
492 492 """get a file context from this changeset"""
493 493 if fileid is None:
494 494 fileid = self.filenode(path)
495 495 return filectx(self._repo, path, fileid=fileid,
496 496 changectx=self, filelog=filelog)
497 497
498 498 def ancestor(self, c2, warn=False):
499 499 """return the "best" ancestor context of self and c2
500 500
501 501 If there are multiple candidates, it will show a message and check
502 502 merge.preferancestor configuration before falling back to the
503 503 revlog ancestor."""
504 504 # deal with workingctxs
505 505 n2 = c2._node
506 506 if n2 is None:
507 507 n2 = c2._parents[0]._node
508 508 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
509 509 if not cahs:
510 510 anc = nullid
511 511 elif len(cahs) == 1:
512 512 anc = cahs[0]
513 513 else:
514 514 # experimental config: merge.preferancestor
515 515 for r in self._repo.ui.configlist('merge', 'preferancestor'):
516 516 try:
517 517 ctx = scmutil.revsymbol(self._repo, r)
518 518 except error.RepoLookupError:
519 519 continue
520 520 anc = ctx.node()
521 521 if anc in cahs:
522 522 break
523 523 else:
524 524 anc = self._repo.changelog.ancestor(self._node, n2)
525 525 if warn:
526 526 self._repo.ui.status(
527 527 (_("note: using %s as ancestor of %s and %s\n") %
528 528 (short(anc), short(self._node), short(n2))) +
529 529 ''.join(_(" alternatively, use --config "
530 530 "merge.preferancestor=%s\n") %
531 531 short(n) for n in sorted(cahs) if n != anc))
532 532 return self._repo[anc]
533 533
534 534 def isancestorof(self, other):
535 535 """True if this changeset is an ancestor of other"""
536 536 return self._repo.changelog.isancestorrev(self._rev, other._rev)
537 537
538 538 def walk(self, match):
539 539 '''Generates matching file names.'''
540 540
541 541 # Wrap match.bad method to have message with nodeid
542 542 def bad(fn, msg):
543 543 # The manifest doesn't know about subrepos, so don't complain about
544 544 # paths into valid subrepos.
545 545 if any(fn == s or fn.startswith(s + '/')
546 546 for s in self.substate):
547 547 return
548 548 match.bad(fn, _('no such file in rev %s') % self)
549 549
550 550 m = matchmod.badmatch(match, bad)
551 551 return self._manifest.walk(m)
552 552
553 553 def matches(self, match):
554 554 return self.walk(match)
555 555
556 556 class basefilectx(object):
557 557 """A filecontext object represents the common logic for its children:
558 558 filectx: read-only access to a filerevision that is already present
559 559 in the repo,
560 560 workingfilectx: a filecontext that represents files from the working
561 561 directory,
562 562 memfilectx: a filecontext that represents files in-memory,
563 563 """
564 564 @propertycache
565 565 def _filelog(self):
566 566 return self._repo.file(self._path)
567 567
568 568 @propertycache
569 569 def _changeid(self):
570 570 if r'_changeid' in self.__dict__:
571 571 return self._changeid
572 572 elif r'_changectx' in self.__dict__:
573 573 return self._changectx.rev()
574 574 elif r'_descendantrev' in self.__dict__:
575 575 # this file context was created from a revision with a known
576 576 # descendant, we can (lazily) correct for linkrev aliases
577 577 return self._adjustlinkrev(self._descendantrev)
578 578 else:
579 579 return self._filelog.linkrev(self._filerev)
580 580
581 581 @propertycache
582 582 def _filenode(self):
583 583 if r'_fileid' in self.__dict__:
584 584 return self._filelog.lookup(self._fileid)
585 585 else:
586 586 return self._changectx.filenode(self._path)
587 587
588 588 @propertycache
589 589 def _filerev(self):
590 590 return self._filelog.rev(self._filenode)
591 591
592 592 @propertycache
593 593 def _repopath(self):
594 594 return self._path
595 595
596 596 def __nonzero__(self):
597 597 try:
598 598 self._filenode
599 599 return True
600 600 except error.LookupError:
601 601 # file is missing
602 602 return False
603 603
604 604 __bool__ = __nonzero__
605 605
606 606 def __bytes__(self):
607 607 try:
608 608 return "%s@%s" % (self.path(), self._changectx)
609 609 except error.LookupError:
610 610 return "%s@???" % self.path()
611 611
612 612 __str__ = encoding.strmethod(__bytes__)
613 613
614 614 def __repr__(self):
615 615 return r"<%s %s>" % (type(self).__name__, str(self))
616 616
617 617 def __hash__(self):
618 618 try:
619 619 return hash((self._path, self._filenode))
620 620 except AttributeError:
621 621 return id(self)
622 622
623 623 def __eq__(self, other):
624 624 try:
625 625 return (type(self) == type(other) and self._path == other._path
626 626 and self._filenode == other._filenode)
627 627 except AttributeError:
628 628 return False
629 629
630 630 def __ne__(self, other):
631 631 return not (self == other)
632 632
633 633 def filerev(self):
634 634 return self._filerev
635 635 def filenode(self):
636 636 return self._filenode
637 637 @propertycache
638 638 def _flags(self):
639 639 return self._changectx.flags(self._path)
640 640 def flags(self):
641 641 return self._flags
642 642 def filelog(self):
643 643 return self._filelog
644 644 def rev(self):
645 645 return self._changeid
646 646 def linkrev(self):
647 647 return self._filelog.linkrev(self._filerev)
648 648 def node(self):
649 649 return self._changectx.node()
650 650 def hex(self):
651 651 return self._changectx.hex()
652 652 def user(self):
653 653 return self._changectx.user()
654 654 def date(self):
655 655 return self._changectx.date()
656 656 def files(self):
657 657 return self._changectx.files()
658 658 def description(self):
659 659 return self._changectx.description()
660 660 def branch(self):
661 661 return self._changectx.branch()
662 662 def extra(self):
663 663 return self._changectx.extra()
664 664 def phase(self):
665 665 return self._changectx.phase()
666 666 def phasestr(self):
667 667 return self._changectx.phasestr()
668 668 def obsolete(self):
669 669 return self._changectx.obsolete()
670 670 def instabilities(self):
671 671 return self._changectx.instabilities()
672 672 def manifest(self):
673 673 return self._changectx.manifest()
674 674 def changectx(self):
675 675 return self._changectx
676 676 def renamed(self):
677 677 return self._copied
678 678 def repo(self):
679 679 return self._repo
680 680 def size(self):
681 681 return len(self.data())
682 682
683 683 def path(self):
684 684 return self._path
685 685
686 686 def isbinary(self):
687 687 try:
688 688 return stringutil.binary(self.data())
689 689 except IOError:
690 690 return False
691 691 def isexec(self):
692 692 return 'x' in self.flags()
693 693 def islink(self):
694 694 return 'l' in self.flags()
695 695
696 696 def isabsent(self):
697 697 """whether this filectx represents a file not in self._changectx
698 698
699 699 This is mainly for merge code to detect change/delete conflicts. This is
700 700 expected to be True for all subclasses of basectx."""
701 701 return False
702 702
703 703 _customcmp = False
704 704 def cmp(self, fctx):
705 705 """compare with other file context
706 706
707 707 returns True if different than fctx.
708 708 """
709 709 if fctx._customcmp:
710 710 return fctx.cmp(self)
711 711
712 712 if (fctx._filenode is None
713 713 and (self._repo._encodefilterpats
714 714 # if file data starts with '\1\n', empty metadata block is
715 715 # prepended, which adds 4 bytes to filelog.size().
716 716 or self.size() - 4 == fctx.size())
717 717 or self.size() == fctx.size()):
718 718 return self._filelog.cmp(self._filenode, fctx.data())
719 719
720 720 return True
721 721
722 722 def _adjustlinkrev(self, srcrev, inclusive=False):
723 723 """return the first ancestor of <srcrev> introducing <fnode>
724 724
725 725 If the linkrev of the file revision does not point to an ancestor of
726 726 srcrev, we'll walk down the ancestors until we find one introducing
727 727 this file revision.
728 728
729 729 :srcrev: the changeset revision we search ancestors from
730 730 :inclusive: if true, the src revision will also be checked
731 731 """
732 732 repo = self._repo
733 733 cl = repo.unfiltered().changelog
734 734 mfl = repo.manifestlog
735 735 # fetch the linkrev
736 736 lkr = self.linkrev()
737 737 # hack to reuse ancestor computation when searching for renames
738 738 memberanc = getattr(self, '_ancestrycontext', None)
739 739 iteranc = None
740 740 if srcrev is None:
741 741 # wctx case, used by workingfilectx during mergecopy
742 742 revs = [p.rev() for p in self._repo[None].parents()]
743 743 inclusive = True # we skipped the real (revless) source
744 744 else:
745 745 revs = [srcrev]
746 746 if memberanc is None:
747 747 memberanc = iteranc = cl.ancestors(revs, lkr,
748 748 inclusive=inclusive)
749 749 # check if this linkrev is an ancestor of srcrev
750 750 if lkr not in memberanc:
751 751 if iteranc is None:
752 752 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
753 753 fnode = self._filenode
754 754 path = self._path
755 755 for a in iteranc:
756 756 ac = cl.read(a) # get changeset data (we avoid object creation)
757 757 if path in ac[3]: # checking the 'files' field.
758 758 # The file has been touched, check if the content is
759 759 # similar to the one we search for.
760 760 if fnode == mfl[ac[0]].readfast().get(path):
761 761 return a
762 762 # In theory, we should never get out of that loop without a result.
763 763 # But if manifest uses a buggy file revision (not children of the
764 764 # one it replaces) we could. Such a buggy situation will likely
765 765 # result is crash somewhere else at to some point.
766 766 return lkr
767 767
768 768 def introrev(self):
769 769 """return the rev of the changeset which introduced this file revision
770 770
771 771 This method is different from linkrev because it take into account the
772 772 changeset the filectx was created from. It ensures the returned
773 773 revision is one of its ancestors. This prevents bugs from
774 774 'linkrev-shadowing' when a file revision is used by multiple
775 775 changesets.
776 776 """
777 777 lkr = self.linkrev()
778 778 attrs = vars(self)
779 779 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
780 780 if noctx or self.rev() == lkr:
781 781 return self.linkrev()
782 782 return self._adjustlinkrev(self.rev(), inclusive=True)
783 783
784 784 def introfilectx(self):
785 785 """Return filectx having identical contents, but pointing to the
786 786 changeset revision where this filectx was introduced"""
787 787 introrev = self.introrev()
788 788 if self.rev() == introrev:
789 789 return self
790 790 return self.filectx(self.filenode(), changeid=introrev)
791 791
792 792 def _parentfilectx(self, path, fileid, filelog):
793 793 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
794 794 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
795 795 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
796 796 # If self is associated with a changeset (probably explicitly
797 797 # fed), ensure the created filectx is associated with a
798 798 # changeset that is an ancestor of self.changectx.
799 799 # This lets us later use _adjustlinkrev to get a correct link.
800 800 fctx._descendantrev = self.rev()
801 801 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
802 802 elif r'_descendantrev' in vars(self):
803 803 # Otherwise propagate _descendantrev if we have one associated.
804 804 fctx._descendantrev = self._descendantrev
805 805 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
806 806 return fctx
807 807
808 808 def parents(self):
809 809 _path = self._path
810 810 fl = self._filelog
811 811 parents = self._filelog.parents(self._filenode)
812 812 pl = [(_path, node, fl) for node in parents if node != nullid]
813 813
814 814 r = fl.renamed(self._filenode)
815 815 if r:
816 816 # - In the simple rename case, both parent are nullid, pl is empty.
817 817 # - In case of merge, only one of the parent is null id and should
818 818 # be replaced with the rename information. This parent is -always-
819 819 # the first one.
820 820 #
821 821 # As null id have always been filtered out in the previous list
822 822 # comprehension, inserting to 0 will always result in "replacing
823 823 # first nullid parent with rename information.
824 824 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
825 825
826 826 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
827 827
828 828 def p1(self):
829 829 return self.parents()[0]
830 830
831 831 def p2(self):
832 832 p = self.parents()
833 833 if len(p) == 2:
834 834 return p[1]
835 835 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
836 836
837 837 def annotate(self, follow=False, skiprevs=None, diffopts=None):
838 838 """Returns a list of annotateline objects for each line in the file
839 839
840 840 - line.fctx is the filectx of the node where that line was last changed
841 841 - line.lineno is the line number at the first appearance in the managed
842 842 file
843 843 - line.text is the data on that line (including newline character)
844 844 """
845 845 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
846 846
847 847 def parents(f):
848 848 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
849 849 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
850 850 # from the topmost introrev (= srcrev) down to p.linkrev() if it
851 851 # isn't an ancestor of the srcrev.
852 852 f._changeid
853 853 pl = f.parents()
854 854
855 855 # Don't return renamed parents if we aren't following.
856 856 if not follow:
857 857 pl = [p for p in pl if p.path() == f.path()]
858 858
859 859 # renamed filectx won't have a filelog yet, so set it
860 860 # from the cache to save time
861 861 for p in pl:
862 862 if not r'_filelog' in p.__dict__:
863 863 p._filelog = getlog(p.path())
864 864
865 865 return pl
866 866
867 867 # use linkrev to find the first changeset where self appeared
868 868 base = self.introfilectx()
869 869 if getattr(base, '_ancestrycontext', None) is None:
870 870 cl = self._repo.changelog
871 871 if base.rev() is None:
872 872 # wctx is not inclusive, but works because _ancestrycontext
873 873 # is used to test filelog revisions
874 874 ac = cl.ancestors([p.rev() for p in base.parents()],
875 875 inclusive=True)
876 876 else:
877 877 ac = cl.ancestors([base.rev()], inclusive=True)
878 878 base._ancestrycontext = ac
879 879
880 880 return dagop.annotate(base, parents, skiprevs=skiprevs,
881 881 diffopts=diffopts)
882 882
883 883 def ancestors(self, followfirst=False):
884 884 visit = {}
885 885 c = self
886 886 if followfirst:
887 887 cut = 1
888 888 else:
889 889 cut = None
890 890
891 891 while True:
892 892 for parent in c.parents()[:cut]:
893 893 visit[(parent.linkrev(), parent.filenode())] = parent
894 894 if not visit:
895 895 break
896 896 c = visit.pop(max(visit))
897 897 yield c
898 898
899 899 def decodeddata(self):
900 900 """Returns `data()` after running repository decoding filters.
901 901
902 902 This is often equivalent to how the data would be expressed on disk.
903 903 """
904 904 return self._repo.wwritedata(self.path(), self.data())
905 905
906 906 class filectx(basefilectx):
907 907 """A filecontext object makes access to data related to a particular
908 908 filerevision convenient."""
909 909 def __init__(self, repo, path, changeid=None, fileid=None,
910 910 filelog=None, changectx=None):
911 911 """changeid can be a changeset revision, node, or tag.
912 912 fileid can be a file revision or node."""
913 913 self._repo = repo
914 914 self._path = path
915 915
916 916 assert (changeid is not None
917 917 or fileid is not None
918 918 or changectx is not None), \
919 919 ("bad args: changeid=%r, fileid=%r, changectx=%r"
920 920 % (changeid, fileid, changectx))
921 921
922 922 if filelog is not None:
923 923 self._filelog = filelog
924 924
925 925 if changeid is not None:
926 926 self._changeid = changeid
927 927 if changectx is not None:
928 928 self._changectx = changectx
929 929 if fileid is not None:
930 930 self._fileid = fileid
931 931
932 932 @propertycache
933 933 def _changectx(self):
934 934 try:
935 935 return self._repo[self._changeid]
936 936 except error.FilteredRepoLookupError:
937 937 # Linkrev may point to any revision in the repository. When the
938 938 # repository is filtered this may lead to `filectx` trying to build
939 939 # `changectx` for filtered revision. In such case we fallback to
940 940 # creating `changectx` on the unfiltered version of the reposition.
941 941 # This fallback should not be an issue because `changectx` from
942 942 # `filectx` are not used in complex operations that care about
943 943 # filtering.
944 944 #
945 945 # This fallback is a cheap and dirty fix that prevent several
946 946 # crashes. It does not ensure the behavior is correct. However the
947 947 # behavior was not correct before filtering either and "incorrect
948 948 # behavior" is seen as better as "crash"
949 949 #
950 950 # Linkrevs have several serious troubles with filtering that are
951 951 # complicated to solve. Proper handling of the issue here should be
952 952 # considered when solving linkrev issue are on the table.
953 953 return self._repo.unfiltered()[self._changeid]
954 954
955 955 def filectx(self, fileid, changeid=None):
956 956 '''opens an arbitrary revision of the file without
957 957 opening a new filelog'''
958 958 return filectx(self._repo, self._path, fileid=fileid,
959 959 filelog=self._filelog, changeid=changeid)
960 960
961 961 def rawdata(self):
962 962 return self._filelog.revision(self._filenode, raw=True)
963 963
964 964 def rawflags(self):
965 965 """low-level revlog flags"""
966 966 return self._filelog.flags(self._filerev)
967 967
968 968 def data(self):
969 969 try:
970 970 return self._filelog.read(self._filenode)
971 971 except error.CensoredNodeError:
972 972 if self._repo.ui.config("censor", "policy") == "ignore":
973 973 return ""
974 974 raise error.Abort(_("censored node: %s") % short(self._filenode),
975 975 hint=_("set censor.policy to ignore errors"))
976 976
977 977 def size(self):
978 978 return self._filelog.size(self._filerev)
979 979
980 980 @propertycache
981 981 def _copied(self):
982 982 """check if file was actually renamed in this changeset revision
983 983
984 984 If rename logged in file revision, we report copy for changeset only
985 985 if file revisions linkrev points back to the changeset in question
986 986 or both changeset parents contain different file revisions.
987 987 """
988 988
989 989 renamed = self._filelog.renamed(self._filenode)
990 990 if not renamed:
991 991 return None
992 992
993 993 if self.rev() == self.linkrev():
994 994 return renamed
995 995
996 996 name = self.path()
997 997 fnode = self._filenode
998 998 for p in self._changectx.parents():
999 999 try:
1000 1000 if fnode == p.filenode(name):
1001 1001 return None
1002 1002 except error.LookupError:
1003 1003 pass
1004 1004 return renamed
1005 1005
1006 1006 def children(self):
1007 1007 # hard for renames
1008 1008 c = self._filelog.children(self._filenode)
1009 1009 return [filectx(self._repo, self._path, fileid=x,
1010 1010 filelog=self._filelog) for x in c]
1011 1011
1012 1012 class committablectx(basectx):
1013 1013 """A committablectx object provides common functionality for a context that
1014 1014 wants the ability to commit, e.g. workingctx or memctx."""
1015 1015 def __init__(self, repo, text="", user=None, date=None, extra=None,
1016 1016 changes=None):
1017 1017 super(committablectx, self).__init__(repo)
1018 1018 self._rev = None
1019 1019 self._node = None
1020 1020 self._text = text
1021 1021 if date:
1022 1022 self._date = dateutil.parsedate(date)
1023 1023 if user:
1024 1024 self._user = user
1025 1025 if changes:
1026 1026 self._status = changes
1027 1027
1028 1028 self._extra = {}
1029 1029 if extra:
1030 1030 self._extra = extra.copy()
1031 1031 if 'branch' not in self._extra:
1032 1032 try:
1033 1033 branch = encoding.fromlocal(self._repo.dirstate.branch())
1034 1034 except UnicodeDecodeError:
1035 1035 raise error.Abort(_('branch name not in UTF-8!'))
1036 1036 self._extra['branch'] = branch
1037 1037 if self._extra['branch'] == '':
1038 1038 self._extra['branch'] = 'default'
1039 1039
1040 1040 def __bytes__(self):
1041 1041 return bytes(self._parents[0]) + "+"
1042 1042
1043 1043 __str__ = encoding.strmethod(__bytes__)
1044 1044
1045 1045 def __nonzero__(self):
1046 1046 return True
1047 1047
1048 1048 __bool__ = __nonzero__
1049 1049
1050 1050 def _buildflagfunc(self):
1051 1051 # Create a fallback function for getting file flags when the
1052 1052 # filesystem doesn't support them
1053 1053
1054 1054 copiesget = self._repo.dirstate.copies().get
1055 1055 parents = self.parents()
1056 1056 if len(parents) < 2:
1057 1057 # when we have one parent, it's easy: copy from parent
1058 1058 man = parents[0].manifest()
1059 1059 def func(f):
1060 1060 f = copiesget(f, f)
1061 1061 return man.flags(f)
1062 1062 else:
1063 1063 # merges are tricky: we try to reconstruct the unstored
1064 1064 # result from the merge (issue1802)
1065 1065 p1, p2 = parents
1066 1066 pa = p1.ancestor(p2)
1067 1067 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1068 1068
1069 1069 def func(f):
1070 1070 f = copiesget(f, f) # may be wrong for merges with copies
1071 1071 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1072 1072 if fl1 == fl2:
1073 1073 return fl1
1074 1074 if fl1 == fla:
1075 1075 return fl2
1076 1076 if fl2 == fla:
1077 1077 return fl1
1078 1078 return '' # punt for conflicts
1079 1079
1080 1080 return func
1081 1081
1082 1082 @propertycache
1083 1083 def _flagfunc(self):
1084 1084 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1085 1085
1086 1086 @propertycache
1087 1087 def _status(self):
1088 1088 return self._repo.status()
1089 1089
1090 1090 @propertycache
1091 1091 def _user(self):
1092 1092 return self._repo.ui.username()
1093 1093
1094 1094 @propertycache
1095 1095 def _date(self):
1096 1096 ui = self._repo.ui
1097 1097 date = ui.configdate('devel', 'default-date')
1098 1098 if date is None:
1099 1099 date = dateutil.makedate()
1100 1100 return date
1101 1101
1102 1102 def subrev(self, subpath):
1103 1103 return None
1104 1104
1105 1105 def manifestnode(self):
1106 1106 return None
1107 1107 def user(self):
1108 1108 return self._user or self._repo.ui.username()
1109 1109 def date(self):
1110 1110 return self._date
1111 1111 def description(self):
1112 1112 return self._text
1113 1113 def files(self):
1114 1114 return sorted(self._status.modified + self._status.added +
1115 1115 self._status.removed)
1116 1116
1117 1117 def modified(self):
1118 1118 return self._status.modified
1119 1119 def added(self):
1120 1120 return self._status.added
1121 1121 def removed(self):
1122 1122 return self._status.removed
1123 1123 def deleted(self):
1124 1124 return self._status.deleted
1125 1125 def branch(self):
1126 1126 return encoding.tolocal(self._extra['branch'])
1127 1127 def closesbranch(self):
1128 1128 return 'close' in self._extra
1129 1129 def extra(self):
1130 1130 return self._extra
1131 1131
1132 1132 def isinmemory(self):
1133 1133 return False
1134 1134
1135 1135 def tags(self):
1136 1136 return []
1137 1137
1138 1138 def bookmarks(self):
1139 1139 b = []
1140 1140 for p in self.parents():
1141 1141 b.extend(p.bookmarks())
1142 1142 return b
1143 1143
1144 1144 def phase(self):
1145 1145 phase = phases.draft # default phase to draft
1146 1146 for p in self.parents():
1147 1147 phase = max(phase, p.phase())
1148 1148 return phase
1149 1149
1150 1150 def hidden(self):
1151 1151 return False
1152 1152
1153 1153 def children(self):
1154 1154 return []
1155 1155
1156 1156 def flags(self, path):
1157 1157 if r'_manifest' in self.__dict__:
1158 1158 try:
1159 1159 return self._manifest.flags(path)
1160 1160 except KeyError:
1161 1161 return ''
1162 1162
1163 1163 try:
1164 1164 return self._flagfunc(path)
1165 1165 except OSError:
1166 1166 return ''
1167 1167
1168 1168 def ancestor(self, c2):
1169 1169 """return the "best" ancestor context of self and c2"""
1170 1170 return self._parents[0].ancestor(c2) # punt on two parents for now
1171 1171
1172 1172 def walk(self, match):
1173 1173 '''Generates matching file names.'''
1174 1174 return sorted(self._repo.dirstate.walk(match,
1175 1175 subrepos=sorted(self.substate),
1176 1176 unknown=True, ignored=False))
1177 1177
1178 1178 def matches(self, match):
1179 1179 ds = self._repo.dirstate
1180 1180 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1181 1181
1182 1182 def ancestors(self):
1183 1183 for p in self._parents:
1184 1184 yield p
1185 1185 for a in self._repo.changelog.ancestors(
1186 1186 [p.rev() for p in self._parents]):
1187 1187 yield self._repo[a]
1188 1188
1189 1189 def markcommitted(self, node):
1190 1190 """Perform post-commit cleanup necessary after committing this ctx
1191 1191
1192 1192 Specifically, this updates backing stores this working context
1193 1193 wraps to reflect the fact that the changes reflected by this
1194 1194 workingctx have been committed. For example, it marks
1195 1195 modified and added files as normal in the dirstate.
1196 1196
1197 1197 """
1198 1198
1199 1199 with self._repo.dirstate.parentchange():
1200 1200 for f in self.modified() + self.added():
1201 1201 self._repo.dirstate.normal(f)
1202 1202 for f in self.removed():
1203 1203 self._repo.dirstate.drop(f)
1204 1204 self._repo.dirstate.setparents(node)
1205 1205
1206 1206 # write changes out explicitly, because nesting wlock at
1207 1207 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1208 1208 # from immediately doing so for subsequent changing files
1209 1209 self._repo.dirstate.write(self._repo.currenttransaction())
1210 1210
1211 1211 def dirty(self, missing=False, merge=True, branch=True):
1212 1212 return False
1213 1213
1214 1214 class workingctx(committablectx):
1215 1215 """A workingctx object makes access to data related to
1216 1216 the current working directory convenient.
1217 1217 date - any valid date string or (unixtime, offset), or None.
1218 1218 user - username string, or None.
1219 1219 extra - a dictionary of extra values, or None.
1220 1220 changes - a list of file lists as returned by localrepo.status()
1221 1221 or None to use the repository status.
1222 1222 """
1223 1223 def __init__(self, repo, text="", user=None, date=None, extra=None,
1224 1224 changes=None):
1225 1225 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1226 1226
1227 1227 def __iter__(self):
1228 1228 d = self._repo.dirstate
1229 1229 for f in d:
1230 1230 if d[f] != 'r':
1231 1231 yield f
1232 1232
1233 1233 def __contains__(self, key):
1234 1234 return self._repo.dirstate[key] not in "?r"
1235 1235
1236 1236 def hex(self):
1237 1237 return hex(wdirid)
1238 1238
1239 1239 @propertycache
1240 1240 def _parents(self):
1241 1241 p = self._repo.dirstate.parents()
1242 1242 if p[1] == nullid:
1243 1243 p = p[:-1]
1244 return [self._repo[x] for x in p]
1244 # use unfiltered repo to delay/avoid loading obsmarkers
1245 unfi = self._repo.unfiltered()
1246 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1245 1247
1246 1248 def _fileinfo(self, path):
1247 1249 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1248 1250 self._manifest
1249 1251 return super(workingctx, self)._fileinfo(path)
1250 1252
1251 1253 def filectx(self, path, filelog=None):
1252 1254 """get a file context from the working directory"""
1253 1255 return workingfilectx(self._repo, path, workingctx=self,
1254 1256 filelog=filelog)
1255 1257
1256 1258 def dirty(self, missing=False, merge=True, branch=True):
1257 1259 "check whether a working directory is modified"
1258 1260 # check subrepos first
1259 1261 for s in sorted(self.substate):
1260 1262 if self.sub(s).dirty(missing=missing):
1261 1263 return True
1262 1264 # check current working dir
1263 1265 return ((merge and self.p2()) or
1264 1266 (branch and self.branch() != self.p1().branch()) or
1265 1267 self.modified() or self.added() or self.removed() or
1266 1268 (missing and self.deleted()))
1267 1269
1268 1270 def add(self, list, prefix=""):
1269 1271 with self._repo.wlock():
1270 1272 ui, ds = self._repo.ui, self._repo.dirstate
1271 1273 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1272 1274 rejected = []
1273 1275 lstat = self._repo.wvfs.lstat
1274 1276 for f in list:
1275 1277 # ds.pathto() returns an absolute file when this is invoked from
1276 1278 # the keyword extension. That gets flagged as non-portable on
1277 1279 # Windows, since it contains the drive letter and colon.
1278 1280 scmutil.checkportable(ui, os.path.join(prefix, f))
1279 1281 try:
1280 1282 st = lstat(f)
1281 1283 except OSError:
1282 1284 ui.warn(_("%s does not exist!\n") % uipath(f))
1283 1285 rejected.append(f)
1284 1286 continue
1285 1287 limit = ui.configbytes('ui', 'large-file-limit')
1286 1288 if limit != 0 and st.st_size > limit:
1287 1289 ui.warn(_("%s: up to %d MB of RAM may be required "
1288 1290 "to manage this file\n"
1289 1291 "(use 'hg revert %s' to cancel the "
1290 1292 "pending addition)\n")
1291 1293 % (f, 3 * st.st_size // 1000000, uipath(f)))
1292 1294 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1293 1295 ui.warn(_("%s not added: only files and symlinks "
1294 1296 "supported currently\n") % uipath(f))
1295 1297 rejected.append(f)
1296 1298 elif ds[f] in 'amn':
1297 1299 ui.warn(_("%s already tracked!\n") % uipath(f))
1298 1300 elif ds[f] == 'r':
1299 1301 ds.normallookup(f)
1300 1302 else:
1301 1303 ds.add(f)
1302 1304 return rejected
1303 1305
1304 1306 def forget(self, files, prefix=""):
1305 1307 with self._repo.wlock():
1306 1308 ds = self._repo.dirstate
1307 1309 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1308 1310 rejected = []
1309 1311 for f in files:
1310 1312 if f not in self._repo.dirstate:
1311 1313 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1312 1314 rejected.append(f)
1313 1315 elif self._repo.dirstate[f] != 'a':
1314 1316 self._repo.dirstate.remove(f)
1315 1317 else:
1316 1318 self._repo.dirstate.drop(f)
1317 1319 return rejected
1318 1320
1319 1321 def undelete(self, list):
1320 1322 pctxs = self.parents()
1321 1323 with self._repo.wlock():
1322 1324 ds = self._repo.dirstate
1323 1325 for f in list:
1324 1326 if self._repo.dirstate[f] != 'r':
1325 1327 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1326 1328 else:
1327 1329 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1328 1330 t = fctx.data()
1329 1331 self._repo.wwrite(f, t, fctx.flags())
1330 1332 self._repo.dirstate.normal(f)
1331 1333
1332 1334 def copy(self, source, dest):
1333 1335 try:
1334 1336 st = self._repo.wvfs.lstat(dest)
1335 1337 except OSError as err:
1336 1338 if err.errno != errno.ENOENT:
1337 1339 raise
1338 1340 self._repo.ui.warn(_("%s does not exist!\n")
1339 1341 % self._repo.dirstate.pathto(dest))
1340 1342 return
1341 1343 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1342 1344 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1343 1345 "symbolic link\n")
1344 1346 % self._repo.dirstate.pathto(dest))
1345 1347 else:
1346 1348 with self._repo.wlock():
1347 1349 if self._repo.dirstate[dest] in '?':
1348 1350 self._repo.dirstate.add(dest)
1349 1351 elif self._repo.dirstate[dest] in 'r':
1350 1352 self._repo.dirstate.normallookup(dest)
1351 1353 self._repo.dirstate.copy(source, dest)
1352 1354
1353 1355 def match(self, pats=None, include=None, exclude=None, default='glob',
1354 1356 listsubrepos=False, badfn=None):
1355 1357 r = self._repo
1356 1358
1357 1359 # Only a case insensitive filesystem needs magic to translate user input
1358 1360 # to actual case in the filesystem.
1359 1361 icasefs = not util.fscasesensitive(r.root)
1360 1362 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1361 1363 default, auditor=r.auditor, ctx=self,
1362 1364 listsubrepos=listsubrepos, badfn=badfn,
1363 1365 icasefs=icasefs)
1364 1366
1365 1367 def _filtersuspectsymlink(self, files):
1366 1368 if not files or self._repo.dirstate._checklink:
1367 1369 return files
1368 1370
1369 1371 # Symlink placeholders may get non-symlink-like contents
1370 1372 # via user error or dereferencing by NFS or Samba servers,
1371 1373 # so we filter out any placeholders that don't look like a
1372 1374 # symlink
1373 1375 sane = []
1374 1376 for f in files:
1375 1377 if self.flags(f) == 'l':
1376 1378 d = self[f].data()
1377 1379 if (d == '' or len(d) >= 1024 or '\n' in d
1378 1380 or stringutil.binary(d)):
1379 1381 self._repo.ui.debug('ignoring suspect symlink placeholder'
1380 1382 ' "%s"\n' % f)
1381 1383 continue
1382 1384 sane.append(f)
1383 1385 return sane
1384 1386
1385 1387 def _checklookup(self, files):
1386 1388 # check for any possibly clean files
1387 1389 if not files:
1388 1390 return [], [], []
1389 1391
1390 1392 modified = []
1391 1393 deleted = []
1392 1394 fixup = []
1393 1395 pctx = self._parents[0]
1394 1396 # do a full compare of any files that might have changed
1395 1397 for f in sorted(files):
1396 1398 try:
1397 1399 # This will return True for a file that got replaced by a
1398 1400 # directory in the interim, but fixing that is pretty hard.
1399 1401 if (f not in pctx or self.flags(f) != pctx.flags(f)
1400 1402 or pctx[f].cmp(self[f])):
1401 1403 modified.append(f)
1402 1404 else:
1403 1405 fixup.append(f)
1404 1406 except (IOError, OSError):
1405 1407 # A file become inaccessible in between? Mark it as deleted,
1406 1408 # matching dirstate behavior (issue5584).
1407 1409 # The dirstate has more complex behavior around whether a
1408 1410 # missing file matches a directory, etc, but we don't need to
1409 1411 # bother with that: if f has made it to this point, we're sure
1410 1412 # it's in the dirstate.
1411 1413 deleted.append(f)
1412 1414
1413 1415 return modified, deleted, fixup
1414 1416
1415 1417 def _poststatusfixup(self, status, fixup):
1416 1418 """update dirstate for files that are actually clean"""
1417 1419 poststatus = self._repo.postdsstatus()
1418 1420 if fixup or poststatus:
1419 1421 try:
1420 1422 oldid = self._repo.dirstate.identity()
1421 1423
1422 1424 # updating the dirstate is optional
1423 1425 # so we don't wait on the lock
1424 1426 # wlock can invalidate the dirstate, so cache normal _after_
1425 1427 # taking the lock
1426 1428 with self._repo.wlock(False):
1427 1429 if self._repo.dirstate.identity() == oldid:
1428 1430 if fixup:
1429 1431 normal = self._repo.dirstate.normal
1430 1432 for f in fixup:
1431 1433 normal(f)
1432 1434 # write changes out explicitly, because nesting
1433 1435 # wlock at runtime may prevent 'wlock.release()'
1434 1436 # after this block from doing so for subsequent
1435 1437 # changing files
1436 1438 tr = self._repo.currenttransaction()
1437 1439 self._repo.dirstate.write(tr)
1438 1440
1439 1441 if poststatus:
1440 1442 for ps in poststatus:
1441 1443 ps(self, status)
1442 1444 else:
1443 1445 # in this case, writing changes out breaks
1444 1446 # consistency, because .hg/dirstate was
1445 1447 # already changed simultaneously after last
1446 1448 # caching (see also issue5584 for detail)
1447 1449 self._repo.ui.debug('skip updating dirstate: '
1448 1450 'identity mismatch\n')
1449 1451 except error.LockError:
1450 1452 pass
1451 1453 finally:
1452 1454 # Even if the wlock couldn't be grabbed, clear out the list.
1453 1455 self._repo.clearpostdsstatus()
1454 1456
1455 1457 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1456 1458 '''Gets the status from the dirstate -- internal use only.'''
1457 1459 subrepos = []
1458 1460 if '.hgsub' in self:
1459 1461 subrepos = sorted(self.substate)
1460 1462 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1461 1463 clean=clean, unknown=unknown)
1462 1464
1463 1465 # check for any possibly clean files
1464 1466 fixup = []
1465 1467 if cmp:
1466 1468 modified2, deleted2, fixup = self._checklookup(cmp)
1467 1469 s.modified.extend(modified2)
1468 1470 s.deleted.extend(deleted2)
1469 1471
1470 1472 if fixup and clean:
1471 1473 s.clean.extend(fixup)
1472 1474
1473 1475 self._poststatusfixup(s, fixup)
1474 1476
1475 1477 if match.always():
1476 1478 # cache for performance
1477 1479 if s.unknown or s.ignored or s.clean:
1478 1480 # "_status" is cached with list*=False in the normal route
1479 1481 self._status = scmutil.status(s.modified, s.added, s.removed,
1480 1482 s.deleted, [], [], [])
1481 1483 else:
1482 1484 self._status = s
1483 1485
1484 1486 return s
1485 1487
1486 1488 @propertycache
1487 1489 def _manifest(self):
1488 1490 """generate a manifest corresponding to the values in self._status
1489 1491
1490 1492 This reuse the file nodeid from parent, but we use special node
1491 1493 identifiers for added and modified files. This is used by manifests
1492 1494 merge to see that files are different and by update logic to avoid
1493 1495 deleting newly added files.
1494 1496 """
1495 1497 return self._buildstatusmanifest(self._status)
1496 1498
1497 1499 def _buildstatusmanifest(self, status):
1498 1500 """Builds a manifest that includes the given status results."""
1499 1501 parents = self.parents()
1500 1502
1501 1503 man = parents[0].manifest().copy()
1502 1504
1503 1505 ff = self._flagfunc
1504 1506 for i, l in ((addednodeid, status.added),
1505 1507 (modifiednodeid, status.modified)):
1506 1508 for f in l:
1507 1509 man[f] = i
1508 1510 try:
1509 1511 man.setflag(f, ff(f))
1510 1512 except OSError:
1511 1513 pass
1512 1514
1513 1515 for f in status.deleted + status.removed:
1514 1516 if f in man:
1515 1517 del man[f]
1516 1518
1517 1519 return man
1518 1520
1519 1521 def _buildstatus(self, other, s, match, listignored, listclean,
1520 1522 listunknown):
1521 1523 """build a status with respect to another context
1522 1524
1523 1525 This includes logic for maintaining the fast path of status when
1524 1526 comparing the working directory against its parent, which is to skip
1525 1527 building a new manifest if self (working directory) is not comparing
1526 1528 against its parent (repo['.']).
1527 1529 """
1528 1530 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1529 1531 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1530 1532 # might have accidentally ended up with the entire contents of the file
1531 1533 # they are supposed to be linking to.
1532 1534 s.modified[:] = self._filtersuspectsymlink(s.modified)
1533 1535 if other != self._repo['.']:
1534 1536 s = super(workingctx, self)._buildstatus(other, s, match,
1535 1537 listignored, listclean,
1536 1538 listunknown)
1537 1539 return s
1538 1540
1539 1541 def _matchstatus(self, other, match):
1540 1542 """override the match method with a filter for directory patterns
1541 1543
1542 1544 We use inheritance to customize the match.bad method only in cases of
1543 1545 workingctx since it belongs only to the working directory when
1544 1546 comparing against the parent changeset.
1545 1547
1546 1548 If we aren't comparing against the working directory's parent, then we
1547 1549 just use the default match object sent to us.
1548 1550 """
1549 1551 if other != self._repo['.']:
1550 1552 def bad(f, msg):
1551 1553 # 'f' may be a directory pattern from 'match.files()',
1552 1554 # so 'f not in ctx1' is not enough
1553 1555 if f not in other and not other.hasdir(f):
1554 1556 self._repo.ui.warn('%s: %s\n' %
1555 1557 (self._repo.dirstate.pathto(f), msg))
1556 1558 match.bad = bad
1557 1559 return match
1558 1560
1559 1561 def markcommitted(self, node):
1560 1562 super(workingctx, self).markcommitted(node)
1561 1563
1562 1564 sparse.aftercommit(self._repo, node)
1563 1565
1564 1566 class committablefilectx(basefilectx):
1565 1567 """A committablefilectx provides common functionality for a file context
1566 1568 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1567 1569 def __init__(self, repo, path, filelog=None, ctx=None):
1568 1570 self._repo = repo
1569 1571 self._path = path
1570 1572 self._changeid = None
1571 1573 self._filerev = self._filenode = None
1572 1574
1573 1575 if filelog is not None:
1574 1576 self._filelog = filelog
1575 1577 if ctx:
1576 1578 self._changectx = ctx
1577 1579
1578 1580 def __nonzero__(self):
1579 1581 return True
1580 1582
1581 1583 __bool__ = __nonzero__
1582 1584
1583 1585 def linkrev(self):
1584 1586 # linked to self._changectx no matter if file is modified or not
1585 1587 return self.rev()
1586 1588
1587 1589 def parents(self):
1588 1590 '''return parent filectxs, following copies if necessary'''
1589 1591 def filenode(ctx, path):
1590 1592 return ctx._manifest.get(path, nullid)
1591 1593
1592 1594 path = self._path
1593 1595 fl = self._filelog
1594 1596 pcl = self._changectx._parents
1595 1597 renamed = self.renamed()
1596 1598
1597 1599 if renamed:
1598 1600 pl = [renamed + (None,)]
1599 1601 else:
1600 1602 pl = [(path, filenode(pcl[0], path), fl)]
1601 1603
1602 1604 for pc in pcl[1:]:
1603 1605 pl.append((path, filenode(pc, path), fl))
1604 1606
1605 1607 return [self._parentfilectx(p, fileid=n, filelog=l)
1606 1608 for p, n, l in pl if n != nullid]
1607 1609
1608 1610 def children(self):
1609 1611 return []
1610 1612
1611 1613 class workingfilectx(committablefilectx):
1612 1614 """A workingfilectx object makes access to data related to a particular
1613 1615 file in the working directory convenient."""
1614 1616 def __init__(self, repo, path, filelog=None, workingctx=None):
1615 1617 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1616 1618
1617 1619 @propertycache
1618 1620 def _changectx(self):
1619 1621 return workingctx(self._repo)
1620 1622
1621 1623 def data(self):
1622 1624 return self._repo.wread(self._path)
1623 1625 def renamed(self):
1624 1626 rp = self._repo.dirstate.copied(self._path)
1625 1627 if not rp:
1626 1628 return None
1627 1629 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1628 1630
1629 1631 def size(self):
1630 1632 return self._repo.wvfs.lstat(self._path).st_size
1631 1633 def date(self):
1632 1634 t, tz = self._changectx.date()
1633 1635 try:
1634 1636 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1635 1637 except OSError as err:
1636 1638 if err.errno != errno.ENOENT:
1637 1639 raise
1638 1640 return (t, tz)
1639 1641
1640 1642 def exists(self):
1641 1643 return self._repo.wvfs.exists(self._path)
1642 1644
1643 1645 def lexists(self):
1644 1646 return self._repo.wvfs.lexists(self._path)
1645 1647
1646 1648 def audit(self):
1647 1649 return self._repo.wvfs.audit(self._path)
1648 1650
1649 1651 def cmp(self, fctx):
1650 1652 """compare with other file context
1651 1653
1652 1654 returns True if different than fctx.
1653 1655 """
1654 1656 # fctx should be a filectx (not a workingfilectx)
1655 1657 # invert comparison to reuse the same code path
1656 1658 return fctx.cmp(self)
1657 1659
1658 1660 def remove(self, ignoremissing=False):
1659 1661 """wraps unlink for a repo's working directory"""
1660 1662 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1661 1663 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1662 1664 rmdir=rmdir)
1663 1665
1664 1666 def write(self, data, flags, backgroundclose=False, **kwargs):
1665 1667 """wraps repo.wwrite"""
1666 1668 self._repo.wwrite(self._path, data, flags,
1667 1669 backgroundclose=backgroundclose,
1668 1670 **kwargs)
1669 1671
1670 1672 def markcopied(self, src):
1671 1673 """marks this file a copy of `src`"""
1672 1674 if self._repo.dirstate[self._path] in "nma":
1673 1675 self._repo.dirstate.copy(src, self._path)
1674 1676
1675 1677 def clearunknown(self):
1676 1678 """Removes conflicting items in the working directory so that
1677 1679 ``write()`` can be called successfully.
1678 1680 """
1679 1681 wvfs = self._repo.wvfs
1680 1682 f = self._path
1681 1683 wvfs.audit(f)
1682 1684 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1683 1685 # remove files under the directory as they should already be
1684 1686 # warned and backed up
1685 1687 if wvfs.isdir(f) and not wvfs.islink(f):
1686 1688 wvfs.rmtree(f, forcibly=True)
1687 1689 for p in reversed(list(util.finddirs(f))):
1688 1690 if wvfs.isfileorlink(p):
1689 1691 wvfs.unlink(p)
1690 1692 break
1691 1693 else:
1692 1694 # don't remove files if path conflicts are not processed
1693 1695 if wvfs.isdir(f) and not wvfs.islink(f):
1694 1696 wvfs.removedirs(f)
1695 1697
1696 1698 def setflags(self, l, x):
1697 1699 self._repo.wvfs.setflags(self._path, l, x)
1698 1700
1699 1701 class overlayworkingctx(committablectx):
1700 1702 """Wraps another mutable context with a write-back cache that can be
1701 1703 converted into a commit context.
1702 1704
1703 1705 self._cache[path] maps to a dict with keys: {
1704 1706 'exists': bool?
1705 1707 'date': date?
1706 1708 'data': str?
1707 1709 'flags': str?
1708 1710 'copied': str? (path or None)
1709 1711 }
1710 1712 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1711 1713 is `False`, the file was deleted.
1712 1714 """
1713 1715
1714 1716 def __init__(self, repo):
1715 1717 super(overlayworkingctx, self).__init__(repo)
1716 1718 self.clean()
1717 1719
1718 1720 def setbase(self, wrappedctx):
1719 1721 self._wrappedctx = wrappedctx
1720 1722 self._parents = [wrappedctx]
1721 1723 # Drop old manifest cache as it is now out of date.
1722 1724 # This is necessary when, e.g., rebasing several nodes with one
1723 1725 # ``overlayworkingctx`` (e.g. with --collapse).
1724 1726 util.clearcachedproperty(self, '_manifest')
1725 1727
1726 1728 def data(self, path):
1727 1729 if self.isdirty(path):
1728 1730 if self._cache[path]['exists']:
1729 1731 if self._cache[path]['data']:
1730 1732 return self._cache[path]['data']
1731 1733 else:
1732 1734 # Must fallback here, too, because we only set flags.
1733 1735 return self._wrappedctx[path].data()
1734 1736 else:
1735 1737 raise error.ProgrammingError("No such file or directory: %s" %
1736 1738 path)
1737 1739 else:
1738 1740 return self._wrappedctx[path].data()
1739 1741
1740 1742 @propertycache
1741 1743 def _manifest(self):
1742 1744 parents = self.parents()
1743 1745 man = parents[0].manifest().copy()
1744 1746
1745 1747 flag = self._flagfunc
1746 1748 for path in self.added():
1747 1749 man[path] = addednodeid
1748 1750 man.setflag(path, flag(path))
1749 1751 for path in self.modified():
1750 1752 man[path] = modifiednodeid
1751 1753 man.setflag(path, flag(path))
1752 1754 for path in self.removed():
1753 1755 del man[path]
1754 1756 return man
1755 1757
1756 1758 @propertycache
1757 1759 def _flagfunc(self):
1758 1760 def f(path):
1759 1761 return self._cache[path]['flags']
1760 1762 return f
1761 1763
1762 1764 def files(self):
1763 1765 return sorted(self.added() + self.modified() + self.removed())
1764 1766
1765 1767 def modified(self):
1766 1768 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1767 1769 self._existsinparent(f)]
1768 1770
1769 1771 def added(self):
1770 1772 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1771 1773 not self._existsinparent(f)]
1772 1774
1773 1775 def removed(self):
1774 1776 return [f for f in self._cache.keys() if
1775 1777 not self._cache[f]['exists'] and self._existsinparent(f)]
1776 1778
1777 1779 def isinmemory(self):
1778 1780 return True
1779 1781
1780 1782 def filedate(self, path):
1781 1783 if self.isdirty(path):
1782 1784 return self._cache[path]['date']
1783 1785 else:
1784 1786 return self._wrappedctx[path].date()
1785 1787
1786 1788 def markcopied(self, path, origin):
1787 1789 if self.isdirty(path):
1788 1790 self._cache[path]['copied'] = origin
1789 1791 else:
1790 1792 raise error.ProgrammingError('markcopied() called on clean context')
1791 1793
1792 1794 def copydata(self, path):
1793 1795 if self.isdirty(path):
1794 1796 return self._cache[path]['copied']
1795 1797 else:
1796 1798 raise error.ProgrammingError('copydata() called on clean context')
1797 1799
1798 1800 def flags(self, path):
1799 1801 if self.isdirty(path):
1800 1802 if self._cache[path]['exists']:
1801 1803 return self._cache[path]['flags']
1802 1804 else:
1803 1805 raise error.ProgrammingError("No such file or directory: %s" %
1804 1806 self._path)
1805 1807 else:
1806 1808 return self._wrappedctx[path].flags()
1807 1809
1808 1810 def _existsinparent(self, path):
1809 1811 try:
1810 1812 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1811 1813 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1812 1814 # with an ``exists()`` function.
1813 1815 self._wrappedctx[path]
1814 1816 return True
1815 1817 except error.ManifestLookupError:
1816 1818 return False
1817 1819
1818 1820 def _auditconflicts(self, path):
1819 1821 """Replicates conflict checks done by wvfs.write().
1820 1822
1821 1823 Since we never write to the filesystem and never call `applyupdates` in
1822 1824 IMM, we'll never check that a path is actually writable -- e.g., because
1823 1825 it adds `a/foo`, but `a` is actually a file in the other commit.
1824 1826 """
1825 1827 def fail(path, component):
1826 1828 # p1() is the base and we're receiving "writes" for p2()'s
1827 1829 # files.
1828 1830 if 'l' in self.p1()[component].flags():
1829 1831 raise error.Abort("error: %s conflicts with symlink %s "
1830 1832 "in %s." % (path, component,
1831 1833 self.p1().rev()))
1832 1834 else:
1833 1835 raise error.Abort("error: '%s' conflicts with file '%s' in "
1834 1836 "%s." % (path, component,
1835 1837 self.p1().rev()))
1836 1838
1837 1839 # Test that each new directory to be created to write this path from p2
1838 1840 # is not a file in p1.
1839 1841 components = path.split('/')
1840 1842 for i in pycompat.xrange(len(components)):
1841 1843 component = "/".join(components[0:i])
1842 1844 if component in self.p1() and self._cache[component]['exists']:
1843 1845 fail(path, component)
1844 1846
1845 1847 # Test the other direction -- that this path from p2 isn't a directory
1846 1848 # in p1 (test that p1 doesn't any paths matching `path/*`).
1847 1849 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1848 1850 matches = self.p1().manifest().matches(match)
1849 1851 mfiles = matches.keys()
1850 1852 if len(mfiles) > 0:
1851 1853 if len(mfiles) == 1 and mfiles[0] == path:
1852 1854 return
1853 1855 # omit the files which are deleted in current IMM wctx
1854 1856 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1855 1857 if not mfiles:
1856 1858 return
1857 1859 raise error.Abort("error: file '%s' cannot be written because "
1858 1860 " '%s/' is a folder in %s (containing %d "
1859 1861 "entries: %s)"
1860 1862 % (path, path, self.p1(), len(mfiles),
1861 1863 ', '.join(mfiles)))
1862 1864
1863 1865 def write(self, path, data, flags='', **kwargs):
1864 1866 if data is None:
1865 1867 raise error.ProgrammingError("data must be non-None")
1866 1868 self._auditconflicts(path)
1867 1869 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1868 1870 flags=flags)
1869 1871
1870 1872 def setflags(self, path, l, x):
1871 1873 flag = ''
1872 1874 if l:
1873 1875 flag = 'l'
1874 1876 elif x:
1875 1877 flag = 'x'
1876 1878 self._markdirty(path, exists=True, date=dateutil.makedate(),
1877 1879 flags=flag)
1878 1880
1879 1881 def remove(self, path):
1880 1882 self._markdirty(path, exists=False)
1881 1883
1882 1884 def exists(self, path):
1883 1885 """exists behaves like `lexists`, but needs to follow symlinks and
1884 1886 return False if they are broken.
1885 1887 """
1886 1888 if self.isdirty(path):
1887 1889 # If this path exists and is a symlink, "follow" it by calling
1888 1890 # exists on the destination path.
1889 1891 if (self._cache[path]['exists'] and
1890 1892 'l' in self._cache[path]['flags']):
1891 1893 return self.exists(self._cache[path]['data'].strip())
1892 1894 else:
1893 1895 return self._cache[path]['exists']
1894 1896
1895 1897 return self._existsinparent(path)
1896 1898
1897 1899 def lexists(self, path):
1898 1900 """lexists returns True if the path exists"""
1899 1901 if self.isdirty(path):
1900 1902 return self._cache[path]['exists']
1901 1903
1902 1904 return self._existsinparent(path)
1903 1905
1904 1906 def size(self, path):
1905 1907 if self.isdirty(path):
1906 1908 if self._cache[path]['exists']:
1907 1909 return len(self._cache[path]['data'])
1908 1910 else:
1909 1911 raise error.ProgrammingError("No such file or directory: %s" %
1910 1912 self._path)
1911 1913 return self._wrappedctx[path].size()
1912 1914
1913 1915 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1914 1916 user=None, editor=None):
1915 1917 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1916 1918 committed.
1917 1919
1918 1920 ``text`` is the commit message.
1919 1921 ``parents`` (optional) are rev numbers.
1920 1922 """
1921 1923 # Default parents to the wrapped contexts' if not passed.
1922 1924 if parents is None:
1923 1925 parents = self._wrappedctx.parents()
1924 1926 if len(parents) == 1:
1925 1927 parents = (parents[0], None)
1926 1928
1927 1929 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1928 1930 if parents[1] is None:
1929 1931 parents = (self._repo[parents[0]], None)
1930 1932 else:
1931 1933 parents = (self._repo[parents[0]], self._repo[parents[1]])
1932 1934
1933 1935 files = self._cache.keys()
1934 1936 def getfile(repo, memctx, path):
1935 1937 if self._cache[path]['exists']:
1936 1938 return memfilectx(repo, memctx, path,
1937 1939 self._cache[path]['data'],
1938 1940 'l' in self._cache[path]['flags'],
1939 1941 'x' in self._cache[path]['flags'],
1940 1942 self._cache[path]['copied'])
1941 1943 else:
1942 1944 # Returning None, but including the path in `files`, is
1943 1945 # necessary for memctx to register a deletion.
1944 1946 return None
1945 1947 return memctx(self._repo, parents, text, files, getfile, date=date,
1946 1948 extra=extra, user=user, branch=branch, editor=editor)
1947 1949
1948 1950 def isdirty(self, path):
1949 1951 return path in self._cache
1950 1952
1951 1953 def isempty(self):
1952 1954 # We need to discard any keys that are actually clean before the empty
1953 1955 # commit check.
1954 1956 self._compact()
1955 1957 return len(self._cache) == 0
1956 1958
1957 1959 def clean(self):
1958 1960 self._cache = {}
1959 1961
1960 1962 def _compact(self):
1961 1963 """Removes keys from the cache that are actually clean, by comparing
1962 1964 them with the underlying context.
1963 1965
1964 1966 This can occur during the merge process, e.g. by passing --tool :local
1965 1967 to resolve a conflict.
1966 1968 """
1967 1969 keys = []
1968 1970 for path in self._cache.keys():
1969 1971 cache = self._cache[path]
1970 1972 try:
1971 1973 underlying = self._wrappedctx[path]
1972 1974 if (underlying.data() == cache['data'] and
1973 1975 underlying.flags() == cache['flags']):
1974 1976 keys.append(path)
1975 1977 except error.ManifestLookupError:
1976 1978 # Path not in the underlying manifest (created).
1977 1979 continue
1978 1980
1979 1981 for path in keys:
1980 1982 del self._cache[path]
1981 1983 return keys
1982 1984
1983 1985 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1984 1986 # data not provided, let's see if we already have some; if not, let's
1985 1987 # grab it from our underlying context, so that we always have data if
1986 1988 # the file is marked as existing.
1987 1989 if exists and data is None:
1988 1990 oldentry = self._cache.get(path) or {}
1989 1991 data = oldentry.get('data') or self._wrappedctx[path].data()
1990 1992
1991 1993 self._cache[path] = {
1992 1994 'exists': exists,
1993 1995 'data': data,
1994 1996 'date': date,
1995 1997 'flags': flags,
1996 1998 'copied': None,
1997 1999 }
1998 2000
1999 2001 def filectx(self, path, filelog=None):
2000 2002 return overlayworkingfilectx(self._repo, path, parent=self,
2001 2003 filelog=filelog)
2002 2004
2003 2005 class overlayworkingfilectx(committablefilectx):
2004 2006 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2005 2007 cache, which can be flushed through later by calling ``flush()``."""
2006 2008
2007 2009 def __init__(self, repo, path, filelog=None, parent=None):
2008 2010 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2009 2011 parent)
2010 2012 self._repo = repo
2011 2013 self._parent = parent
2012 2014 self._path = path
2013 2015
2014 2016 def cmp(self, fctx):
2015 2017 return self.data() != fctx.data()
2016 2018
2017 2019 def changectx(self):
2018 2020 return self._parent
2019 2021
2020 2022 def data(self):
2021 2023 return self._parent.data(self._path)
2022 2024
2023 2025 def date(self):
2024 2026 return self._parent.filedate(self._path)
2025 2027
2026 2028 def exists(self):
2027 2029 return self.lexists()
2028 2030
2029 2031 def lexists(self):
2030 2032 return self._parent.exists(self._path)
2031 2033
2032 2034 def renamed(self):
2033 2035 path = self._parent.copydata(self._path)
2034 2036 if not path:
2035 2037 return None
2036 2038 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2037 2039
2038 2040 def size(self):
2039 2041 return self._parent.size(self._path)
2040 2042
2041 2043 def markcopied(self, origin):
2042 2044 self._parent.markcopied(self._path, origin)
2043 2045
2044 2046 def audit(self):
2045 2047 pass
2046 2048
2047 2049 def flags(self):
2048 2050 return self._parent.flags(self._path)
2049 2051
2050 2052 def setflags(self, islink, isexec):
2051 2053 return self._parent.setflags(self._path, islink, isexec)
2052 2054
2053 2055 def write(self, data, flags, backgroundclose=False, **kwargs):
2054 2056 return self._parent.write(self._path, data, flags, **kwargs)
2055 2057
2056 2058 def remove(self, ignoremissing=False):
2057 2059 return self._parent.remove(self._path)
2058 2060
2059 2061 def clearunknown(self):
2060 2062 pass
2061 2063
2062 2064 class workingcommitctx(workingctx):
2063 2065 """A workingcommitctx object makes access to data related to
2064 2066 the revision being committed convenient.
2065 2067
2066 2068 This hides changes in the working directory, if they aren't
2067 2069 committed in this context.
2068 2070 """
2069 2071 def __init__(self, repo, changes,
2070 2072 text="", user=None, date=None, extra=None):
2071 2073 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2072 2074 changes)
2073 2075
2074 2076 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2075 2077 """Return matched files only in ``self._status``
2076 2078
2077 2079 Uncommitted files appear "clean" via this context, even if
2078 2080 they aren't actually so in the working directory.
2079 2081 """
2080 2082 if clean:
2081 2083 clean = [f for f in self._manifest if f not in self._changedset]
2082 2084 else:
2083 2085 clean = []
2084 2086 return scmutil.status([f for f in self._status.modified if match(f)],
2085 2087 [f for f in self._status.added if match(f)],
2086 2088 [f for f in self._status.removed if match(f)],
2087 2089 [], [], [], clean)
2088 2090
2089 2091 @propertycache
2090 2092 def _changedset(self):
2091 2093 """Return the set of files changed in this context
2092 2094 """
2093 2095 changed = set(self._status.modified)
2094 2096 changed.update(self._status.added)
2095 2097 changed.update(self._status.removed)
2096 2098 return changed
2097 2099
2098 2100 def makecachingfilectxfn(func):
2099 2101 """Create a filectxfn that caches based on the path.
2100 2102
2101 2103 We can't use util.cachefunc because it uses all arguments as the cache
2102 2104 key and this creates a cycle since the arguments include the repo and
2103 2105 memctx.
2104 2106 """
2105 2107 cache = {}
2106 2108
2107 2109 def getfilectx(repo, memctx, path):
2108 2110 if path not in cache:
2109 2111 cache[path] = func(repo, memctx, path)
2110 2112 return cache[path]
2111 2113
2112 2114 return getfilectx
2113 2115
2114 2116 def memfilefromctx(ctx):
2115 2117 """Given a context return a memfilectx for ctx[path]
2116 2118
2117 2119 This is a convenience method for building a memctx based on another
2118 2120 context.
2119 2121 """
2120 2122 def getfilectx(repo, memctx, path):
2121 2123 fctx = ctx[path]
2122 2124 # this is weird but apparently we only keep track of one parent
2123 2125 # (why not only store that instead of a tuple?)
2124 2126 copied = fctx.renamed()
2125 2127 if copied:
2126 2128 copied = copied[0]
2127 2129 return memfilectx(repo, memctx, path, fctx.data(),
2128 2130 islink=fctx.islink(), isexec=fctx.isexec(),
2129 2131 copied=copied)
2130 2132
2131 2133 return getfilectx
2132 2134
2133 2135 def memfilefrompatch(patchstore):
2134 2136 """Given a patch (e.g. patchstore object) return a memfilectx
2135 2137
2136 2138 This is a convenience method for building a memctx based on a patchstore.
2137 2139 """
2138 2140 def getfilectx(repo, memctx, path):
2139 2141 data, mode, copied = patchstore.getfile(path)
2140 2142 if data is None:
2141 2143 return None
2142 2144 islink, isexec = mode
2143 2145 return memfilectx(repo, memctx, path, data, islink=islink,
2144 2146 isexec=isexec, copied=copied)
2145 2147
2146 2148 return getfilectx
2147 2149
2148 2150 class memctx(committablectx):
2149 2151 """Use memctx to perform in-memory commits via localrepo.commitctx().
2150 2152
2151 2153 Revision information is supplied at initialization time while
2152 2154 related files data and is made available through a callback
2153 2155 mechanism. 'repo' is the current localrepo, 'parents' is a
2154 2156 sequence of two parent revisions identifiers (pass None for every
2155 2157 missing parent), 'text' is the commit message and 'files' lists
2156 2158 names of files touched by the revision (normalized and relative to
2157 2159 repository root).
2158 2160
2159 2161 filectxfn(repo, memctx, path) is a callable receiving the
2160 2162 repository, the current memctx object and the normalized path of
2161 2163 requested file, relative to repository root. It is fired by the
2162 2164 commit function for every file in 'files', but calls order is
2163 2165 undefined. If the file is available in the revision being
2164 2166 committed (updated or added), filectxfn returns a memfilectx
2165 2167 object. If the file was removed, filectxfn return None for recent
2166 2168 Mercurial. Moved files are represented by marking the source file
2167 2169 removed and the new file added with copy information (see
2168 2170 memfilectx).
2169 2171
2170 2172 user receives the committer name and defaults to current
2171 2173 repository username, date is the commit date in any format
2172 2174 supported by dateutil.parsedate() and defaults to current date, extra
2173 2175 is a dictionary of metadata or is left empty.
2174 2176 """
2175 2177
2176 2178 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2177 2179 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2178 2180 # this field to determine what to do in filectxfn.
2179 2181 _returnnoneformissingfiles = True
2180 2182
2181 2183 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2182 2184 date=None, extra=None, branch=None, editor=False):
2183 2185 super(memctx, self).__init__(repo, text, user, date, extra)
2184 2186 self._rev = None
2185 2187 self._node = None
2186 2188 parents = [(p or nullid) for p in parents]
2187 2189 p1, p2 = parents
2188 2190 self._parents = [self._repo[p] for p in (p1, p2)]
2189 2191 files = sorted(set(files))
2190 2192 self._files = files
2191 2193 if branch is not None:
2192 2194 self._extra['branch'] = encoding.fromlocal(branch)
2193 2195 self.substate = {}
2194 2196
2195 2197 if isinstance(filectxfn, patch.filestore):
2196 2198 filectxfn = memfilefrompatch(filectxfn)
2197 2199 elif not callable(filectxfn):
2198 2200 # if store is not callable, wrap it in a function
2199 2201 filectxfn = memfilefromctx(filectxfn)
2200 2202
2201 2203 # memoizing increases performance for e.g. vcs convert scenarios.
2202 2204 self._filectxfn = makecachingfilectxfn(filectxfn)
2203 2205
2204 2206 if editor:
2205 2207 self._text = editor(self._repo, self, [])
2206 2208 self._repo.savecommitmessage(self._text)
2207 2209
2208 2210 def filectx(self, path, filelog=None):
2209 2211 """get a file context from the working directory
2210 2212
2211 2213 Returns None if file doesn't exist and should be removed."""
2212 2214 return self._filectxfn(self._repo, self, path)
2213 2215
2214 2216 def commit(self):
2215 2217 """commit context to the repo"""
2216 2218 return self._repo.commitctx(self)
2217 2219
2218 2220 @propertycache
2219 2221 def _manifest(self):
2220 2222 """generate a manifest based on the return values of filectxfn"""
2221 2223
2222 2224 # keep this simple for now; just worry about p1
2223 2225 pctx = self._parents[0]
2224 2226 man = pctx.manifest().copy()
2225 2227
2226 2228 for f in self._status.modified:
2227 2229 man[f] = modifiednodeid
2228 2230
2229 2231 for f in self._status.added:
2230 2232 man[f] = addednodeid
2231 2233
2232 2234 for f in self._status.removed:
2233 2235 if f in man:
2234 2236 del man[f]
2235 2237
2236 2238 return man
2237 2239
2238 2240 @propertycache
2239 2241 def _status(self):
2240 2242 """Calculate exact status from ``files`` specified at construction
2241 2243 """
2242 2244 man1 = self.p1().manifest()
2243 2245 p2 = self._parents[1]
2244 2246 # "1 < len(self._parents)" can't be used for checking
2245 2247 # existence of the 2nd parent, because "memctx._parents" is
2246 2248 # explicitly initialized by the list, of which length is 2.
2247 2249 if p2.node() != nullid:
2248 2250 man2 = p2.manifest()
2249 2251 managing = lambda f: f in man1 or f in man2
2250 2252 else:
2251 2253 managing = lambda f: f in man1
2252 2254
2253 2255 modified, added, removed = [], [], []
2254 2256 for f in self._files:
2255 2257 if not managing(f):
2256 2258 added.append(f)
2257 2259 elif self[f]:
2258 2260 modified.append(f)
2259 2261 else:
2260 2262 removed.append(f)
2261 2263
2262 2264 return scmutil.status(modified, added, removed, [], [], [], [])
2263 2265
2264 2266 class memfilectx(committablefilectx):
2265 2267 """memfilectx represents an in-memory file to commit.
2266 2268
2267 2269 See memctx and committablefilectx for more details.
2268 2270 """
2269 2271 def __init__(self, repo, changectx, path, data, islink=False,
2270 2272 isexec=False, copied=None):
2271 2273 """
2272 2274 path is the normalized file path relative to repository root.
2273 2275 data is the file content as a string.
2274 2276 islink is True if the file is a symbolic link.
2275 2277 isexec is True if the file is executable.
2276 2278 copied is the source file path if current file was copied in the
2277 2279 revision being committed, or None."""
2278 2280 super(memfilectx, self).__init__(repo, path, None, changectx)
2279 2281 self._data = data
2280 2282 if islink:
2281 2283 self._flags = 'l'
2282 2284 elif isexec:
2283 2285 self._flags = 'x'
2284 2286 else:
2285 2287 self._flags = ''
2286 2288 self._copied = None
2287 2289 if copied:
2288 2290 self._copied = (copied, nullid)
2289 2291
2290 2292 def data(self):
2291 2293 return self._data
2292 2294
2293 2295 def remove(self, ignoremissing=False):
2294 2296 """wraps unlink for a repo's working directory"""
2295 2297 # need to figure out what to do here
2296 2298 del self._changectx[self._path]
2297 2299
2298 2300 def write(self, data, flags, **kwargs):
2299 2301 """wraps repo.wwrite"""
2300 2302 self._data = data
2301 2303
2302 2304
2303 2305 class metadataonlyctx(committablectx):
2304 2306 """Like memctx but it's reusing the manifest of different commit.
2305 2307 Intended to be used by lightweight operations that are creating
2306 2308 metadata-only changes.
2307 2309
2308 2310 Revision information is supplied at initialization time. 'repo' is the
2309 2311 current localrepo, 'ctx' is original revision which manifest we're reuisng
2310 2312 'parents' is a sequence of two parent revisions identifiers (pass None for
2311 2313 every missing parent), 'text' is the commit.
2312 2314
2313 2315 user receives the committer name and defaults to current repository
2314 2316 username, date is the commit date in any format supported by
2315 2317 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2316 2318 metadata or is left empty.
2317 2319 """
2318 2320 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2319 2321 date=None, extra=None, editor=False):
2320 2322 if text is None:
2321 2323 text = originalctx.description()
2322 2324 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2323 2325 self._rev = None
2324 2326 self._node = None
2325 2327 self._originalctx = originalctx
2326 2328 self._manifestnode = originalctx.manifestnode()
2327 2329 if parents is None:
2328 2330 parents = originalctx.parents()
2329 2331 else:
2330 2332 parents = [repo[p] for p in parents if p is not None]
2331 2333 parents = parents[:]
2332 2334 while len(parents) < 2:
2333 2335 parents.append(repo[nullid])
2334 2336 p1, p2 = self._parents = parents
2335 2337
2336 2338 # sanity check to ensure that the reused manifest parents are
2337 2339 # manifests of our commit parents
2338 2340 mp1, mp2 = self.manifestctx().parents
2339 2341 if p1 != nullid and p1.manifestnode() != mp1:
2340 2342 raise RuntimeError('can\'t reuse the manifest: '
2341 2343 'its p1 doesn\'t match the new ctx p1')
2342 2344 if p2 != nullid and p2.manifestnode() != mp2:
2343 2345 raise RuntimeError('can\'t reuse the manifest: '
2344 2346 'its p2 doesn\'t match the new ctx p2')
2345 2347
2346 2348 self._files = originalctx.files()
2347 2349 self.substate = {}
2348 2350
2349 2351 if editor:
2350 2352 self._text = editor(self._repo, self, [])
2351 2353 self._repo.savecommitmessage(self._text)
2352 2354
2353 2355 def manifestnode(self):
2354 2356 return self._manifestnode
2355 2357
2356 2358 @property
2357 2359 def _manifestctx(self):
2358 2360 return self._repo.manifestlog[self._manifestnode]
2359 2361
2360 2362 def filectx(self, path, filelog=None):
2361 2363 return self._originalctx.filectx(path, filelog=filelog)
2362 2364
2363 2365 def commit(self):
2364 2366 """commit context to the repo"""
2365 2367 return self._repo.commitctx(self)
2366 2368
2367 2369 @property
2368 2370 def _manifest(self):
2369 2371 return self._originalctx.manifest()
2370 2372
2371 2373 @propertycache
2372 2374 def _status(self):
2373 2375 """Calculate exact status from ``files`` specified in the ``origctx``
2374 2376 and parents manifests.
2375 2377 """
2376 2378 man1 = self.p1().manifest()
2377 2379 p2 = self._parents[1]
2378 2380 # "1 < len(self._parents)" can't be used for checking
2379 2381 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2380 2382 # explicitly initialized by the list, of which length is 2.
2381 2383 if p2.node() != nullid:
2382 2384 man2 = p2.manifest()
2383 2385 managing = lambda f: f in man1 or f in man2
2384 2386 else:
2385 2387 managing = lambda f: f in man1
2386 2388
2387 2389 modified, added, removed = [], [], []
2388 2390 for f in self._files:
2389 2391 if not managing(f):
2390 2392 added.append(f)
2391 2393 elif f in self:
2392 2394 modified.append(f)
2393 2395 else:
2394 2396 removed.append(f)
2395 2397
2396 2398 return scmutil.status(modified, added, removed, [], [], [], [])
2397 2399
2398 2400 class arbitraryfilectx(object):
2399 2401 """Allows you to use filectx-like functions on a file in an arbitrary
2400 2402 location on disk, possibly not in the working directory.
2401 2403 """
2402 2404 def __init__(self, path, repo=None):
2403 2405 # Repo is optional because contrib/simplemerge uses this class.
2404 2406 self._repo = repo
2405 2407 self._path = path
2406 2408
2407 2409 def cmp(self, fctx):
2408 2410 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2409 2411 # path if either side is a symlink.
2410 2412 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2411 2413 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2412 2414 # Add a fast-path for merge if both sides are disk-backed.
2413 2415 # Note that filecmp uses the opposite return values (True if same)
2414 2416 # from our cmp functions (True if different).
2415 2417 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2416 2418 return self.data() != fctx.data()
2417 2419
2418 2420 def path(self):
2419 2421 return self._path
2420 2422
2421 2423 def flags(self):
2422 2424 return ''
2423 2425
2424 2426 def data(self):
2425 2427 return util.readfile(self._path)
2426 2428
2427 2429 def decodeddata(self):
2428 2430 with open(self._path, "rb") as f:
2429 2431 return f.read()
2430 2432
2431 2433 def remove(self):
2432 2434 util.unlink(self._path)
2433 2435
2434 2436 def write(self, data, flags, **kwargs):
2435 2437 assert not flags
2436 2438 with open(self._path, "w") as f:
2437 2439 f.write(data)
@@ -1,3001 +1,3000 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 95 def __set__(self, repo, value):
96 96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 97 def __delete__(self, repo):
98 98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99 99
100 100 class repofilecache(_basefilecache):
101 101 """filecache for files in .hg but outside of .hg/store"""
102 102 def __init__(self, *paths):
103 103 super(repofilecache, self).__init__(*paths)
104 104 for path in paths:
105 105 _cachedfiles.add((path, 'plain'))
106 106
107 107 def join(self, obj, fname):
108 108 return obj.vfs.join(fname)
109 109
110 110 class storecache(_basefilecache):
111 111 """filecache for files in the store"""
112 112 def __init__(self, *paths):
113 113 super(storecache, self).__init__(*paths)
114 114 for path in paths:
115 115 _cachedfiles.add((path, ''))
116 116
117 117 def join(self, obj, fname):
118 118 return obj.sjoin(fname)
119 119
120 120 def isfilecached(repo, name):
121 121 """check if a repo has already cached "name" filecache-ed property
122 122
123 123 This returns (cachedobj-or-None, iscached) tuple.
124 124 """
125 125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 126 if not cacheentry:
127 127 return None, False
128 128 return cacheentry.obj, True
129 129
130 130 class unfilteredpropertycache(util.propertycache):
131 131 """propertycache that apply to unfiltered repo only"""
132 132
133 133 def __get__(self, repo, type=None):
134 134 unfi = repo.unfiltered()
135 135 if unfi is repo:
136 136 return super(unfilteredpropertycache, self).__get__(unfi)
137 137 return getattr(unfi, self.name)
138 138
139 139 class filteredpropertycache(util.propertycache):
140 140 """propertycache that must take filtering in account"""
141 141
142 142 def cachevalue(self, obj, value):
143 143 object.__setattr__(obj, self.name, value)
144 144
145 145
146 146 def hasunfilteredcache(repo, name):
147 147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 148 return name in vars(repo.unfiltered())
149 149
150 150 def unfilteredmethod(orig):
151 151 """decorate method that always need to be run on unfiltered version"""
152 152 def wrapper(repo, *args, **kwargs):
153 153 return orig(repo.unfiltered(), *args, **kwargs)
154 154 return wrapper
155 155
156 156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 157 'unbundle'}
158 158 legacycaps = moderncaps.union({'changegroupsubset'})
159 159
160 160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 161 class localcommandexecutor(object):
162 162 def __init__(self, peer):
163 163 self._peer = peer
164 164 self._sent = False
165 165 self._closed = False
166 166
167 167 def __enter__(self):
168 168 return self
169 169
170 170 def __exit__(self, exctype, excvalue, exctb):
171 171 self.close()
172 172
173 173 def callcommand(self, command, args):
174 174 if self._sent:
175 175 raise error.ProgrammingError('callcommand() cannot be used after '
176 176 'sendcommands()')
177 177
178 178 if self._closed:
179 179 raise error.ProgrammingError('callcommand() cannot be used after '
180 180 'close()')
181 181
182 182 # We don't need to support anything fancy. Just call the named
183 183 # method on the peer and return a resolved future.
184 184 fn = getattr(self._peer, pycompat.sysstr(command))
185 185
186 186 f = pycompat.futures.Future()
187 187
188 188 try:
189 189 result = fn(**pycompat.strkwargs(args))
190 190 except Exception:
191 191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 192 else:
193 193 f.set_result(result)
194 194
195 195 return f
196 196
197 197 def sendcommands(self):
198 198 self._sent = True
199 199
200 200 def close(self):
201 201 self._closed = True
202 202
203 203 @interfaceutil.implementer(repository.ipeercommands)
204 204 class localpeer(repository.peer):
205 205 '''peer for a local repo; reflects only the most recent API'''
206 206
207 207 def __init__(self, repo, caps=None):
208 208 super(localpeer, self).__init__()
209 209
210 210 if caps is None:
211 211 caps = moderncaps.copy()
212 212 self._repo = repo.filtered('served')
213 213 self.ui = repo.ui
214 214 self._caps = repo._restrictcapabilities(caps)
215 215
216 216 # Begin of _basepeer interface.
217 217
218 218 def url(self):
219 219 return self._repo.url()
220 220
221 221 def local(self):
222 222 return self._repo
223 223
224 224 def peer(self):
225 225 return self
226 226
227 227 def canpush(self):
228 228 return True
229 229
230 230 def close(self):
231 231 self._repo.close()
232 232
233 233 # End of _basepeer interface.
234 234
235 235 # Begin of _basewirecommands interface.
236 236
237 237 def branchmap(self):
238 238 return self._repo.branchmap()
239 239
240 240 def capabilities(self):
241 241 return self._caps
242 242
243 243 def clonebundles(self):
244 244 return self._repo.tryread('clonebundles.manifest')
245 245
246 246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 247 """Used to test argument passing over the wire"""
248 248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 249 pycompat.bytestr(four),
250 250 pycompat.bytestr(five))
251 251
252 252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 253 **kwargs):
254 254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 255 common=common, bundlecaps=bundlecaps,
256 256 **kwargs)[1]
257 257 cb = util.chunkbuffer(chunks)
258 258
259 259 if exchange.bundle2requested(bundlecaps):
260 260 # When requesting a bundle2, getbundle returns a stream to make the
261 261 # wire level function happier. We need to build a proper object
262 262 # from it in local peer.
263 263 return bundle2.getunbundler(self.ui, cb)
264 264 else:
265 265 return changegroup.getunbundler('01', cb, None)
266 266
267 267 def heads(self):
268 268 return self._repo.heads()
269 269
270 270 def known(self, nodes):
271 271 return self._repo.known(nodes)
272 272
273 273 def listkeys(self, namespace):
274 274 return self._repo.listkeys(namespace)
275 275
276 276 def lookup(self, key):
277 277 return self._repo.lookup(key)
278 278
279 279 def pushkey(self, namespace, key, old, new):
280 280 return self._repo.pushkey(namespace, key, old, new)
281 281
282 282 def stream_out(self):
283 283 raise error.Abort(_('cannot perform stream clone against local '
284 284 'peer'))
285 285
286 286 def unbundle(self, bundle, heads, url):
287 287 """apply a bundle on a repo
288 288
289 289 This function handles the repo locking itself."""
290 290 try:
291 291 try:
292 292 bundle = exchange.readbundle(self.ui, bundle, None)
293 293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 294 if util.safehasattr(ret, 'getchunks'):
295 295 # This is a bundle20 object, turn it into an unbundler.
296 296 # This little dance should be dropped eventually when the
297 297 # API is finally improved.
298 298 stream = util.chunkbuffer(ret.getchunks())
299 299 ret = bundle2.getunbundler(self.ui, stream)
300 300 return ret
301 301 except Exception as exc:
302 302 # If the exception contains output salvaged from a bundle2
303 303 # reply, we need to make sure it is printed before continuing
304 304 # to fail. So we build a bundle2 with such output and consume
305 305 # it directly.
306 306 #
307 307 # This is not very elegant but allows a "simple" solution for
308 308 # issue4594
309 309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 310 if output:
311 311 bundler = bundle2.bundle20(self._repo.ui)
312 312 for out in output:
313 313 bundler.addpart(out)
314 314 stream = util.chunkbuffer(bundler.getchunks())
315 315 b = bundle2.getunbundler(self.ui, stream)
316 316 bundle2.processbundle(self._repo, b)
317 317 raise
318 318 except error.PushRaced as exc:
319 319 raise error.ResponseError(_('push failed:'),
320 320 stringutil.forcebytestr(exc))
321 321
322 322 # End of _basewirecommands interface.
323 323
324 324 # Begin of peer interface.
325 325
326 326 def commandexecutor(self):
327 327 return localcommandexecutor(self)
328 328
329 329 # End of peer interface.
330 330
331 331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 332 class locallegacypeer(localpeer):
333 333 '''peer extension which implements legacy methods too; used for tests with
334 334 restricted capabilities'''
335 335
336 336 def __init__(self, repo):
337 337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338 338
339 339 # Begin of baselegacywirecommands interface.
340 340
341 341 def between(self, pairs):
342 342 return self._repo.between(pairs)
343 343
344 344 def branches(self, nodes):
345 345 return self._repo.branches(nodes)
346 346
347 347 def changegroup(self, nodes, source):
348 348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 349 missingheads=self._repo.heads())
350 350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351 351
352 352 def changegroupsubset(self, bases, heads, source):
353 353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 354 missingheads=heads)
355 355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356 356
357 357 # End of baselegacywirecommands interface.
358 358
359 359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 360 # clients.
361 361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362 362
363 363 # A repository with the sparserevlog feature will have delta chains that
364 364 # can spread over a larger span. Sparse reading cuts these large spans into
365 365 # pieces, so that each piece isn't too big.
366 366 # Without the sparserevlog capability, reading from the repository could use
367 367 # huge amounts of memory, because the whole span would be read at once,
368 368 # including all the intermediate revisions that aren't pertinent for the chain.
369 369 # This is why once a repository has enabled sparse-read, it becomes required.
370 370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371 371
372 372 # Functions receiving (ui, features) that extensions can register to impact
373 373 # the ability to load repositories with custom requirements. Only
374 374 # functions defined in loaded extensions are called.
375 375 #
376 376 # The function receives a set of requirement strings that the repository
377 377 # is capable of opening. Functions will typically add elements to the
378 378 # set to reflect that the extension knows how to handle that requirements.
379 379 featuresetupfuncs = set()
380 380
381 381 def makelocalrepository(baseui, path, intents=None):
382 382 """Create a local repository object.
383 383
384 384 Given arguments needed to construct a local repository, this function
385 385 performs various early repository loading functionality (such as
386 386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 387 the repository can be opened, derives a type suitable for representing
388 388 that repository, and returns an instance of it.
389 389
390 390 The returned object conforms to the ``repository.completelocalrepository``
391 391 interface.
392 392
393 393 The repository type is derived by calling a series of factory functions
394 394 for each aspect/interface of the final repository. These are defined by
395 395 ``REPO_INTERFACES``.
396 396
397 397 Each factory function is called to produce a type implementing a specific
398 398 interface. The cumulative list of returned types will be combined into a
399 399 new type and that type will be instantiated to represent the local
400 400 repository.
401 401
402 402 The factory functions each receive various state that may be consulted
403 403 as part of deriving a type.
404 404
405 405 Extensions should wrap these factory functions to customize repository type
406 406 creation. Note that an extension's wrapped function may be called even if
407 407 that extension is not loaded for the repo being constructed. Extensions
408 408 should check if their ``__name__`` appears in the
409 409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 410 not.
411 411 """
412 412 ui = baseui.copy()
413 413 # Prevent copying repo configuration.
414 414 ui.copy = baseui.copy
415 415
416 416 # Working directory VFS rooted at repository root.
417 417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418 418
419 419 # Main VFS for .hg/ directory.
420 420 hgpath = wdirvfs.join(b'.hg')
421 421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422 422
423 423 # The .hg/ path should exist and should be a directory. All other
424 424 # cases are errors.
425 425 if not hgvfs.isdir():
426 426 try:
427 427 hgvfs.stat()
428 428 except OSError as e:
429 429 if e.errno != errno.ENOENT:
430 430 raise
431 431
432 432 raise error.RepoError(_(b'repository %s not found') % path)
433 433
434 434 # .hg/requires file contains a newline-delimited list of
435 435 # features/capabilities the opener (us) must have in order to use
436 436 # the repository. This file was introduced in Mercurial 0.9.2,
437 437 # which means very old repositories may not have one. We assume
438 438 # a missing file translates to no requirements.
439 439 try:
440 440 requirements = set(hgvfs.read(b'requires').splitlines())
441 441 except IOError as e:
442 442 if e.errno != errno.ENOENT:
443 443 raise
444 444 requirements = set()
445 445
446 446 # The .hg/hgrc file may load extensions or contain config options
447 447 # that influence repository construction. Attempt to load it and
448 448 # process any new extensions that it may have pulled in.
449 449 try:
450 450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 451 # Run this before extensions.loadall() so extensions can be
452 452 # automatically enabled.
453 453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 454 except IOError:
455 455 pass
456 456 else:
457 457 extensions.loadall(ui)
458 458
459 459 # Set of module names of extensions loaded for this repository.
460 460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461 461
462 462 supportedrequirements = gathersupportedrequirements(ui)
463 463
464 464 # We first validate the requirements are known.
465 465 ensurerequirementsrecognized(requirements, supportedrequirements)
466 466
467 467 # Then we validate that the known set is reasonable to use together.
468 468 ensurerequirementscompatible(ui, requirements)
469 469
470 470 # TODO there are unhandled edge cases related to opening repositories with
471 471 # shared storage. If storage is shared, we should also test for requirements
472 472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 473 # that repo, as that repo may load extensions needed to open it. This is a
474 474 # bit complicated because we don't want the other hgrc to overwrite settings
475 475 # in this hgrc.
476 476 #
477 477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 478 # file when sharing repos. But if a requirement is added after the share is
479 479 # performed, thereby introducing a new requirement for the opener, we may
480 480 # will not see that and could encounter a run-time error interacting with
481 481 # that shared store since it has an unknown-to-us requirement.
482 482
483 483 # At this point, we know we should be capable of opening the repository.
484 484 # Now get on with doing that.
485 485
486 486 features = set()
487 487
488 488 # The "store" part of the repository holds versioned data. How it is
489 489 # accessed is determined by various requirements. The ``shared`` or
490 490 # ``relshared`` requirements indicate the store lives in the path contained
491 491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 493 if b'shared' in requirements or b'relshared' in requirements:
494 494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 495 if b'relshared' in requirements:
496 496 sharedpath = hgvfs.join(sharedpath)
497 497
498 498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499 499
500 500 if not sharedvfs.exists():
501 501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 502 b'directory %s') % sharedvfs.base)
503 503
504 504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505 505
506 506 storebasepath = sharedvfs.base
507 507 cachepath = sharedvfs.join(b'cache')
508 508 else:
509 509 storebasepath = hgvfs.base
510 510 cachepath = hgvfs.join(b'cache')
511 511
512 512 # The store has changed over time and the exact layout is dictated by
513 513 # requirements. The store interface abstracts differences across all
514 514 # of them.
515 515 store = makestore(requirements, storebasepath,
516 516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 517 hgvfs.createmode = store.createmode
518 518
519 519 storevfs = store.vfs
520 520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521 521
522 522 # The cache vfs is used to manage cache files.
523 523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 524 cachevfs.createmode = store.createmode
525 525
526 526 # Now resolve the type for the repository object. We do this by repeatedly
527 527 # calling a factory function to produces types for specific aspects of the
528 528 # repo's operation. The aggregate returned types are used as base classes
529 529 # for a dynamically-derived type, which will represent our new repository.
530 530
531 531 bases = []
532 532 extrastate = {}
533 533
534 534 for iface, fn in REPO_INTERFACES:
535 535 # We pass all potentially useful state to give extensions tons of
536 536 # flexibility.
537 537 typ = fn(ui=ui,
538 538 intents=intents,
539 539 requirements=requirements,
540 540 features=features,
541 541 wdirvfs=wdirvfs,
542 542 hgvfs=hgvfs,
543 543 store=store,
544 544 storevfs=storevfs,
545 545 storeoptions=storevfs.options,
546 546 cachevfs=cachevfs,
547 547 extensionmodulenames=extensionmodulenames,
548 548 extrastate=extrastate,
549 549 baseclasses=bases)
550 550
551 551 if not isinstance(typ, type):
552 552 raise error.ProgrammingError('unable to construct type for %s' %
553 553 iface)
554 554
555 555 bases.append(typ)
556 556
557 557 # type() allows you to use characters in type names that wouldn't be
558 558 # recognized as Python symbols in source code. We abuse that to add
559 559 # rich information about our constructed repo.
560 560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 561 wdirvfs.base,
562 562 b','.join(sorted(requirements))))
563 563
564 564 cls = type(name, tuple(bases), {})
565 565
566 566 return cls(
567 567 baseui=baseui,
568 568 ui=ui,
569 569 origroot=path,
570 570 wdirvfs=wdirvfs,
571 571 hgvfs=hgvfs,
572 572 requirements=requirements,
573 573 supportedrequirements=supportedrequirements,
574 574 sharedpath=storebasepath,
575 575 store=store,
576 576 cachevfs=cachevfs,
577 577 features=features,
578 578 intents=intents)
579 579
580 580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 581 """Perform additional actions after .hg/hgrc is loaded.
582 582
583 583 This function is called during repository loading immediately after
584 584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585 585
586 586 The function can be used to validate configs, automatically add
587 587 options (including extensions) based on requirements, etc.
588 588 """
589 589
590 590 # Map of requirements to list of extensions to load automatically when
591 591 # requirement is present.
592 592 autoextensions = {
593 593 b'largefiles': [b'largefiles'],
594 594 b'lfs': [b'lfs'],
595 595 }
596 596
597 597 for requirement, names in sorted(autoextensions.items()):
598 598 if requirement not in requirements:
599 599 continue
600 600
601 601 for name in names:
602 602 if not ui.hasconfig(b'extensions', name):
603 603 ui.setconfig(b'extensions', name, b'', source='autoload')
604 604
605 605 def gathersupportedrequirements(ui):
606 606 """Determine the complete set of recognized requirements."""
607 607 # Start with all requirements supported by this file.
608 608 supported = set(localrepository._basesupported)
609 609
610 610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 611 # relevant to this ui instance.
612 612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613 613
614 614 for fn in featuresetupfuncs:
615 615 if fn.__module__ in modules:
616 616 fn(ui, supported)
617 617
618 618 # Add derived requirements from registered compression engines.
619 619 for name in util.compengines:
620 620 engine = util.compengines[name]
621 621 if engine.revlogheader():
622 622 supported.add(b'exp-compression-%s' % name)
623 623
624 624 return supported
625 625
626 626 def ensurerequirementsrecognized(requirements, supported):
627 627 """Validate that a set of local requirements is recognized.
628 628
629 629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 630 exists any requirement in that set that currently loaded code doesn't
631 631 recognize.
632 632
633 633 Returns a set of supported requirements.
634 634 """
635 635 missing = set()
636 636
637 637 for requirement in requirements:
638 638 if requirement in supported:
639 639 continue
640 640
641 641 if not requirement or not requirement[0:1].isalnum():
642 642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643 643
644 644 missing.add(requirement)
645 645
646 646 if missing:
647 647 raise error.RequirementError(
648 648 _(b'repository requires features unknown to this Mercurial: %s') %
649 649 b' '.join(sorted(missing)),
650 650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 651 b'for more information'))
652 652
653 653 def ensurerequirementscompatible(ui, requirements):
654 654 """Validates that a set of recognized requirements is mutually compatible.
655 655
656 656 Some requirements may not be compatible with others or require
657 657 config options that aren't enabled. This function is called during
658 658 repository opening to ensure that the set of requirements needed
659 659 to open a repository is sane and compatible with config options.
660 660
661 661 Extensions can monkeypatch this function to perform additional
662 662 checking.
663 663
664 664 ``error.RepoError`` should be raised on failure.
665 665 """
666 666 if b'exp-sparse' in requirements and not sparse.enabled:
667 667 raise error.RepoError(_(b'repository is using sparse feature but '
668 668 b'sparse is not enabled; enable the '
669 669 b'"sparse" extensions to access'))
670 670
671 671 def makestore(requirements, path, vfstype):
672 672 """Construct a storage object for a repository."""
673 673 if b'store' in requirements:
674 674 if b'fncache' in requirements:
675 675 return storemod.fncachestore(path, vfstype,
676 676 b'dotencode' in requirements)
677 677
678 678 return storemod.encodedstore(path, vfstype)
679 679
680 680 return storemod.basicstore(path, vfstype)
681 681
682 682 def resolvestorevfsoptions(ui, requirements, features):
683 683 """Resolve the options to pass to the store vfs opener.
684 684
685 685 The returned dict is used to influence behavior of the storage layer.
686 686 """
687 687 options = {}
688 688
689 689 if b'treemanifest' in requirements:
690 690 options[b'treemanifest'] = True
691 691
692 692 # experimental config: format.manifestcachesize
693 693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 694 if manifestcachesize is not None:
695 695 options[b'manifestcachesize'] = manifestcachesize
696 696
697 697 # In the absence of another requirement superseding a revlog-related
698 698 # requirement, we have to assume the repo is using revlog version 0.
699 699 # This revlog format is super old and we don't bother trying to parse
700 700 # opener options for it because those options wouldn't do anything
701 701 # meaningful on such old repos.
702 702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704 704
705 705 return options
706 706
707 707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 708 """Resolve opener options specific to revlogs."""
709 709
710 710 options = {}
711 711
712 712 if b'revlogv1' in requirements:
713 713 options[b'revlogv1'] = True
714 714 if REVLOGV2_REQUIREMENT in requirements:
715 715 options[b'revlogv2'] = True
716 716
717 717 if b'generaldelta' in requirements:
718 718 options[b'generaldelta'] = True
719 719
720 720 # experimental config: format.chunkcachesize
721 721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 722 if chunkcachesize is not None:
723 723 options[b'chunkcachesize'] = chunkcachesize
724 724
725 725 deltabothparents = ui.configbool(b'storage',
726 726 b'revlog.optimize-delta-parent-choice')
727 727 options[b'deltabothparents'] = deltabothparents
728 728
729 729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730 730
731 731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 732 if 0 <= chainspan:
733 733 options[b'maxdeltachainspan'] = chainspan
734 734
735 735 mmapindexthreshold = ui.configbytes(b'experimental',
736 736 b'mmapindexthreshold')
737 737 if mmapindexthreshold is not None:
738 738 options[b'mmapindexthreshold'] = mmapindexthreshold
739 739
740 740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 741 srdensitythres = float(ui.config(b'experimental',
742 742 b'sparse-read.density-threshold'))
743 743 srmingapsize = ui.configbytes(b'experimental',
744 744 b'sparse-read.min-gap-size')
745 745 options[b'with-sparse-read'] = withsparseread
746 746 options[b'sparse-read-density-threshold'] = srdensitythres
747 747 options[b'sparse-read-min-gap-size'] = srmingapsize
748 748
749 749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 750 options[b'sparse-revlog'] = sparserevlog
751 751 if sparserevlog:
752 752 options[b'generaldelta'] = True
753 753
754 754 maxchainlen = None
755 755 if sparserevlog:
756 756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 757 # experimental config: format.maxchainlen
758 758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 759 if maxchainlen is not None:
760 760 options[b'maxchainlen'] = maxchainlen
761 761
762 762 for r in requirements:
763 763 if r.startswith(b'exp-compression-'):
764 764 options[b'compengine'] = r[len(b'exp-compression-'):]
765 765
766 766 if repository.NARROW_REQUIREMENT in requirements:
767 767 options[b'enableellipsis'] = True
768 768
769 769 return options
770 770
771 771 def makemain(**kwargs):
772 772 """Produce a type conforming to ``ilocalrepositorymain``."""
773 773 return localrepository
774 774
775 775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 776 class revlogfilestorage(object):
777 777 """File storage when using revlogs."""
778 778
779 779 def file(self, path):
780 780 if path[0] == b'/':
781 781 path = path[1:]
782 782
783 783 return filelog.filelog(self.svfs, path)
784 784
785 785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 786 class revlognarrowfilestorage(object):
787 787 """File storage when using revlogs and narrow files."""
788 788
789 789 def file(self, path):
790 790 if path[0] == b'/':
791 791 path = path[1:]
792 792
793 793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794 794
795 795 def makefilestorage(requirements, features, **kwargs):
796 796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798 798
799 799 if repository.NARROW_REQUIREMENT in requirements:
800 800 return revlognarrowfilestorage
801 801 else:
802 802 return revlogfilestorage
803 803
804 804 # List of repository interfaces and factory functions for them. Each
805 805 # will be called in order during ``makelocalrepository()`` to iteratively
806 806 # derive the final type for a local repository instance.
807 807 REPO_INTERFACES = [
808 808 (repository.ilocalrepositorymain, makemain),
809 809 (repository.ilocalrepositoryfilestorage, makefilestorage),
810 810 ]
811 811
812 812 @interfaceutil.implementer(repository.ilocalrepositorymain)
813 813 class localrepository(object):
814 814 """Main class for representing local repositories.
815 815
816 816 All local repositories are instances of this class.
817 817
818 818 Constructed on its own, instances of this class are not usable as
819 819 repository objects. To obtain a usable repository object, call
820 820 ``hg.repository()``, ``localrepo.instance()``, or
821 821 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
822 822 ``instance()`` adds support for creating new repositories.
823 823 ``hg.repository()`` adds more extension integration, including calling
824 824 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
825 825 used.
826 826 """
827 827
828 828 # obsolete experimental requirements:
829 829 # - manifestv2: An experimental new manifest format that allowed
830 830 # for stem compression of long paths. Experiment ended up not
831 831 # being successful (repository sizes went up due to worse delta
832 832 # chains), and the code was deleted in 4.6.
833 833 supportedformats = {
834 834 'revlogv1',
835 835 'generaldelta',
836 836 'treemanifest',
837 837 REVLOGV2_REQUIREMENT,
838 838 SPARSEREVLOG_REQUIREMENT,
839 839 }
840 840 _basesupported = supportedformats | {
841 841 'store',
842 842 'fncache',
843 843 'shared',
844 844 'relshared',
845 845 'dotencode',
846 846 'exp-sparse',
847 847 'internal-phase'
848 848 }
849 849
850 850 # list of prefix for file which can be written without 'wlock'
851 851 # Extensions should extend this list when needed
852 852 _wlockfreeprefix = {
853 853 # We migh consider requiring 'wlock' for the next
854 854 # two, but pretty much all the existing code assume
855 855 # wlock is not needed so we keep them excluded for
856 856 # now.
857 857 'hgrc',
858 858 'requires',
859 859 # XXX cache is a complicatged business someone
860 860 # should investigate this in depth at some point
861 861 'cache/',
862 862 # XXX shouldn't be dirstate covered by the wlock?
863 863 'dirstate',
864 864 # XXX bisect was still a bit too messy at the time
865 865 # this changeset was introduced. Someone should fix
866 866 # the remainig bit and drop this line
867 867 'bisect.state',
868 868 }
869 869
870 870 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
871 871 supportedrequirements, sharedpath, store, cachevfs,
872 872 features, intents=None):
873 873 """Create a new local repository instance.
874 874
875 875 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
876 876 or ``localrepo.makelocalrepository()`` for obtaining a new repository
877 877 object.
878 878
879 879 Arguments:
880 880
881 881 baseui
882 882 ``ui.ui`` instance that ``ui`` argument was based off of.
883 883
884 884 ui
885 885 ``ui.ui`` instance for use by the repository.
886 886
887 887 origroot
888 888 ``bytes`` path to working directory root of this repository.
889 889
890 890 wdirvfs
891 891 ``vfs.vfs`` rooted at the working directory.
892 892
893 893 hgvfs
894 894 ``vfs.vfs`` rooted at .hg/
895 895
896 896 requirements
897 897 ``set`` of bytestrings representing repository opening requirements.
898 898
899 899 supportedrequirements
900 900 ``set`` of bytestrings representing repository requirements that we
901 901 know how to open. May be a supetset of ``requirements``.
902 902
903 903 sharedpath
904 904 ``bytes`` Defining path to storage base directory. Points to a
905 905 ``.hg/`` directory somewhere.
906 906
907 907 store
908 908 ``store.basicstore`` (or derived) instance providing access to
909 909 versioned storage.
910 910
911 911 cachevfs
912 912 ``vfs.vfs`` used for cache files.
913 913
914 914 features
915 915 ``set`` of bytestrings defining features/capabilities of this
916 916 instance.
917 917
918 918 intents
919 919 ``set`` of system strings indicating what this repo will be used
920 920 for.
921 921 """
922 922 self.baseui = baseui
923 923 self.ui = ui
924 924 self.origroot = origroot
925 925 # vfs rooted at working directory.
926 926 self.wvfs = wdirvfs
927 927 self.root = wdirvfs.base
928 928 # vfs rooted at .hg/. Used to access most non-store paths.
929 929 self.vfs = hgvfs
930 930 self.path = hgvfs.base
931 931 self.requirements = requirements
932 932 self.supported = supportedrequirements
933 933 self.sharedpath = sharedpath
934 934 self.store = store
935 935 self.cachevfs = cachevfs
936 936 self.features = features
937 937
938 938 self.filtername = None
939 939
940 940 if (self.ui.configbool('devel', 'all-warnings') or
941 941 self.ui.configbool('devel', 'check-locks')):
942 942 self.vfs.audit = self._getvfsward(self.vfs.audit)
943 943 # A list of callback to shape the phase if no data were found.
944 944 # Callback are in the form: func(repo, roots) --> processed root.
945 945 # This list it to be filled by extension during repo setup
946 946 self._phasedefaults = []
947 947
948 948 color.setup(self.ui)
949 949
950 950 self.spath = self.store.path
951 951 self.svfs = self.store.vfs
952 952 self.sjoin = self.store.join
953 953 if (self.ui.configbool('devel', 'all-warnings') or
954 954 self.ui.configbool('devel', 'check-locks')):
955 955 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
956 956 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
957 957 else: # standard vfs
958 958 self.svfs.audit = self._getsvfsward(self.svfs.audit)
959 959
960 960 self._dirstatevalidatewarned = False
961 961
962 962 self._branchcaches = {}
963 963 self._revbranchcache = None
964 964 self._filterpats = {}
965 965 self._datafilters = {}
966 966 self._transref = self._lockref = self._wlockref = None
967 967
968 968 # A cache for various files under .hg/ that tracks file changes,
969 969 # (used by the filecache decorator)
970 970 #
971 971 # Maps a property name to its util.filecacheentry
972 972 self._filecache = {}
973 973
974 974 # hold sets of revision to be filtered
975 975 # should be cleared when something might have changed the filter value:
976 976 # - new changesets,
977 977 # - phase change,
978 978 # - new obsolescence marker,
979 979 # - working directory parent change,
980 980 # - bookmark changes
981 981 self.filteredrevcache = {}
982 982
983 983 # post-dirstate-status hooks
984 984 self._postdsstatus = []
985 985
986 986 # generic mapping between names and nodes
987 987 self.names = namespaces.namespaces()
988 988
989 989 # Key to signature value.
990 990 self._sparsesignaturecache = {}
991 991 # Signature to cached matcher instance.
992 992 self._sparsematchercache = {}
993 993
994 994 def _getvfsward(self, origfunc):
995 995 """build a ward for self.vfs"""
996 996 rref = weakref.ref(self)
997 997 def checkvfs(path, mode=None):
998 998 ret = origfunc(path, mode=mode)
999 999 repo = rref()
1000 1000 if (repo is None
1001 1001 or not util.safehasattr(repo, '_wlockref')
1002 1002 or not util.safehasattr(repo, '_lockref')):
1003 1003 return
1004 1004 if mode in (None, 'r', 'rb'):
1005 1005 return
1006 1006 if path.startswith(repo.path):
1007 1007 # truncate name relative to the repository (.hg)
1008 1008 path = path[len(repo.path) + 1:]
1009 1009 if path.startswith('cache/'):
1010 1010 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1011 1011 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1012 1012 if path.startswith('journal.'):
1013 1013 # journal is covered by 'lock'
1014 1014 if repo._currentlock(repo._lockref) is None:
1015 1015 repo.ui.develwarn('write with no lock: "%s"' % path,
1016 1016 stacklevel=2, config='check-locks')
1017 1017 elif repo._currentlock(repo._wlockref) is None:
1018 1018 # rest of vfs files are covered by 'wlock'
1019 1019 #
1020 1020 # exclude special files
1021 1021 for prefix in self._wlockfreeprefix:
1022 1022 if path.startswith(prefix):
1023 1023 return
1024 1024 repo.ui.develwarn('write with no wlock: "%s"' % path,
1025 1025 stacklevel=2, config='check-locks')
1026 1026 return ret
1027 1027 return checkvfs
1028 1028
1029 1029 def _getsvfsward(self, origfunc):
1030 1030 """build a ward for self.svfs"""
1031 1031 rref = weakref.ref(self)
1032 1032 def checksvfs(path, mode=None):
1033 1033 ret = origfunc(path, mode=mode)
1034 1034 repo = rref()
1035 1035 if repo is None or not util.safehasattr(repo, '_lockref'):
1036 1036 return
1037 1037 if mode in (None, 'r', 'rb'):
1038 1038 return
1039 1039 if path.startswith(repo.sharedpath):
1040 1040 # truncate name relative to the repository (.hg)
1041 1041 path = path[len(repo.sharedpath) + 1:]
1042 1042 if repo._currentlock(repo._lockref) is None:
1043 1043 repo.ui.develwarn('write with no lock: "%s"' % path,
1044 1044 stacklevel=3)
1045 1045 return ret
1046 1046 return checksvfs
1047 1047
1048 1048 def close(self):
1049 1049 self._writecaches()
1050 1050
1051 1051 def _writecaches(self):
1052 1052 if self._revbranchcache:
1053 1053 self._revbranchcache.write()
1054 1054
1055 1055 def _restrictcapabilities(self, caps):
1056 1056 if self.ui.configbool('experimental', 'bundle2-advertise'):
1057 1057 caps = set(caps)
1058 1058 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1059 1059 role='client'))
1060 1060 caps.add('bundle2=' + urlreq.quote(capsblob))
1061 1061 return caps
1062 1062
1063 1063 def _writerequirements(self):
1064 1064 scmutil.writerequires(self.vfs, self.requirements)
1065 1065
1066 1066 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1067 1067 # self -> auditor -> self._checknested -> self
1068 1068
1069 1069 @property
1070 1070 def auditor(self):
1071 1071 # This is only used by context.workingctx.match in order to
1072 1072 # detect files in subrepos.
1073 1073 return pathutil.pathauditor(self.root, callback=self._checknested)
1074 1074
1075 1075 @property
1076 1076 def nofsauditor(self):
1077 1077 # This is only used by context.basectx.match in order to detect
1078 1078 # files in subrepos.
1079 1079 return pathutil.pathauditor(self.root, callback=self._checknested,
1080 1080 realfs=False, cached=True)
1081 1081
1082 1082 def _checknested(self, path):
1083 1083 """Determine if path is a legal nested repository."""
1084 1084 if not path.startswith(self.root):
1085 1085 return False
1086 1086 subpath = path[len(self.root) + 1:]
1087 1087 normsubpath = util.pconvert(subpath)
1088 1088
1089 1089 # XXX: Checking against the current working copy is wrong in
1090 1090 # the sense that it can reject things like
1091 1091 #
1092 1092 # $ hg cat -r 10 sub/x.txt
1093 1093 #
1094 1094 # if sub/ is no longer a subrepository in the working copy
1095 1095 # parent revision.
1096 1096 #
1097 1097 # However, it can of course also allow things that would have
1098 1098 # been rejected before, such as the above cat command if sub/
1099 1099 # is a subrepository now, but was a normal directory before.
1100 1100 # The old path auditor would have rejected by mistake since it
1101 1101 # panics when it sees sub/.hg/.
1102 1102 #
1103 1103 # All in all, checking against the working copy seems sensible
1104 1104 # since we want to prevent access to nested repositories on
1105 1105 # the filesystem *now*.
1106 1106 ctx = self[None]
1107 1107 parts = util.splitpath(subpath)
1108 1108 while parts:
1109 1109 prefix = '/'.join(parts)
1110 1110 if prefix in ctx.substate:
1111 1111 if prefix == normsubpath:
1112 1112 return True
1113 1113 else:
1114 1114 sub = ctx.sub(prefix)
1115 1115 return sub.checknested(subpath[len(prefix) + 1:])
1116 1116 else:
1117 1117 parts.pop()
1118 1118 return False
1119 1119
1120 1120 def peer(self):
1121 1121 return localpeer(self) # not cached to avoid reference cycle
1122 1122
1123 1123 def unfiltered(self):
1124 1124 """Return unfiltered version of the repository
1125 1125
1126 1126 Intended to be overwritten by filtered repo."""
1127 1127 return self
1128 1128
1129 1129 def filtered(self, name, visibilityexceptions=None):
1130 1130 """Return a filtered version of a repository"""
1131 1131 cls = repoview.newtype(self.unfiltered().__class__)
1132 1132 return cls(self, name, visibilityexceptions)
1133 1133
1134 1134 @repofilecache('bookmarks', 'bookmarks.current')
1135 1135 def _bookmarks(self):
1136 1136 return bookmarks.bmstore(self)
1137 1137
1138 1138 @property
1139 1139 def _activebookmark(self):
1140 1140 return self._bookmarks.active
1141 1141
1142 1142 # _phasesets depend on changelog. what we need is to call
1143 1143 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1144 1144 # can't be easily expressed in filecache mechanism.
1145 1145 @storecache('phaseroots', '00changelog.i')
1146 1146 def _phasecache(self):
1147 1147 return phases.phasecache(self, self._phasedefaults)
1148 1148
1149 1149 @storecache('obsstore')
1150 1150 def obsstore(self):
1151 1151 return obsolete.makestore(self.ui, self)
1152 1152
1153 1153 @storecache('00changelog.i')
1154 1154 def changelog(self):
1155 1155 return changelog.changelog(self.svfs,
1156 1156 trypending=txnutil.mayhavepending(self.root))
1157 1157
1158 1158 @storecache('00manifest.i')
1159 1159 def manifestlog(self):
1160 1160 rootstore = manifest.manifestrevlog(self.svfs)
1161 1161 return manifest.manifestlog(self.svfs, self, rootstore)
1162 1162
1163 1163 @repofilecache('dirstate')
1164 1164 def dirstate(self):
1165 1165 return self._makedirstate()
1166 1166
1167 1167 def _makedirstate(self):
1168 1168 """Extension point for wrapping the dirstate per-repo."""
1169 1169 sparsematchfn = lambda: sparse.matcher(self)
1170 1170
1171 1171 return dirstate.dirstate(self.vfs, self.ui, self.root,
1172 1172 self._dirstatevalidate, sparsematchfn)
1173 1173
1174 1174 def _dirstatevalidate(self, node):
1175 1175 try:
1176 1176 self.changelog.rev(node)
1177 1177 return node
1178 1178 except error.LookupError:
1179 1179 if not self._dirstatevalidatewarned:
1180 1180 self._dirstatevalidatewarned = True
1181 1181 self.ui.warn(_("warning: ignoring unknown"
1182 1182 " working parent %s!\n") % short(node))
1183 1183 return nullid
1184 1184
1185 1185 @storecache(narrowspec.FILENAME)
1186 1186 def narrowpats(self):
1187 1187 """matcher patterns for this repository's narrowspec
1188 1188
1189 1189 A tuple of (includes, excludes).
1190 1190 """
1191 1191 return narrowspec.load(self)
1192 1192
1193 1193 @storecache(narrowspec.FILENAME)
1194 1194 def _narrowmatch(self):
1195 1195 if repository.NARROW_REQUIREMENT not in self.requirements:
1196 1196 return matchmod.always(self.root, '')
1197 1197 include, exclude = self.narrowpats
1198 1198 return narrowspec.match(self.root, include=include, exclude=exclude)
1199 1199
1200 1200 # TODO(martinvonz): make this property-like instead?
1201 1201 def narrowmatch(self):
1202 1202 return self._narrowmatch
1203 1203
1204 1204 def setnarrowpats(self, newincludes, newexcludes):
1205 1205 narrowspec.save(self, newincludes, newexcludes)
1206 1206 self.invalidate(clearfilecache=True)
1207 1207
1208 1208 def __getitem__(self, changeid):
1209 1209 if changeid is None:
1210 1210 return context.workingctx(self)
1211 1211 if isinstance(changeid, context.basectx):
1212 1212 return changeid
1213 1213 if isinstance(changeid, slice):
1214 1214 # wdirrev isn't contiguous so the slice shouldn't include it
1215 1215 return [self[i]
1216 1216 for i in pycompat.xrange(*changeid.indices(len(self)))
1217 1217 if i not in self.changelog.filteredrevs]
1218 1218 try:
1219 1219 if isinstance(changeid, int):
1220 1220 node = self.changelog.node(changeid)
1221 1221 rev = changeid
1222 1222 return context.changectx(self, rev, node)
1223 1223 elif changeid == 'null':
1224 1224 node = nullid
1225 1225 rev = nullrev
1226 1226 return context.changectx(self, rev, node)
1227 1227 elif changeid == 'tip':
1228 1228 node = self.changelog.tip()
1229 1229 rev = self.changelog.rev(node)
1230 1230 return context.changectx(self, rev, node)
1231 elif (changeid == '.'
1232 or self.local() and changeid == self.dirstate.p1()):
1231 elif changeid == '.':
1233 1232 # this is a hack to delay/avoid loading obsmarkers
1234 1233 # when we know that '.' won't be hidden
1235 1234 node = self.dirstate.p1()
1236 1235 rev = self.unfiltered().changelog.rev(node)
1237 1236 return context.changectx(self, rev, node)
1238 1237 elif len(changeid) == 20:
1239 1238 try:
1240 1239 node = changeid
1241 1240 rev = self.changelog.rev(changeid)
1242 1241 return context.changectx(self, rev, node)
1243 1242 except error.FilteredLookupError:
1244 1243 changeid = hex(changeid) # for the error message
1245 1244 raise
1246 1245 except LookupError:
1247 1246 # check if it might have come from damaged dirstate
1248 1247 #
1249 1248 # XXX we could avoid the unfiltered if we had a recognizable
1250 1249 # exception for filtered changeset access
1251 1250 if (self.local()
1252 1251 and changeid in self.unfiltered().dirstate.parents()):
1253 1252 msg = _("working directory has unknown parent '%s'!")
1254 1253 raise error.Abort(msg % short(changeid))
1255 1254 changeid = hex(changeid) # for the error message
1256 1255
1257 1256 elif len(changeid) == 40:
1258 1257 try:
1259 1258 node = bin(changeid)
1260 1259 rev = self.changelog.rev(node)
1261 1260 return context.changectx(self, rev, node)
1262 1261 except error.FilteredLookupError:
1263 1262 raise
1264 1263 except LookupError:
1265 1264 pass
1266 1265 else:
1267 1266 raise error.ProgrammingError(
1268 1267 "unsupported changeid '%s' of type %s" %
1269 1268 (changeid, type(changeid)))
1270 1269
1271 1270 except (error.FilteredIndexError, error.FilteredLookupError):
1272 1271 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1273 1272 % pycompat.bytestr(changeid))
1274 1273 except IndexError:
1275 1274 pass
1276 1275 except error.WdirUnsupported:
1277 1276 return context.workingctx(self)
1278 1277 raise error.RepoLookupError(
1279 1278 _("unknown revision '%s'") % changeid)
1280 1279
1281 1280 def __contains__(self, changeid):
1282 1281 """True if the given changeid exists
1283 1282
1284 1283 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1285 1284 specified.
1286 1285 """
1287 1286 try:
1288 1287 self[changeid]
1289 1288 return True
1290 1289 except error.RepoLookupError:
1291 1290 return False
1292 1291
1293 1292 def __nonzero__(self):
1294 1293 return True
1295 1294
1296 1295 __bool__ = __nonzero__
1297 1296
1298 1297 def __len__(self):
1299 1298 # no need to pay the cost of repoview.changelog
1300 1299 unfi = self.unfiltered()
1301 1300 return len(unfi.changelog)
1302 1301
1303 1302 def __iter__(self):
1304 1303 return iter(self.changelog)
1305 1304
1306 1305 def revs(self, expr, *args):
1307 1306 '''Find revisions matching a revset.
1308 1307
1309 1308 The revset is specified as a string ``expr`` that may contain
1310 1309 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1311 1310
1312 1311 Revset aliases from the configuration are not expanded. To expand
1313 1312 user aliases, consider calling ``scmutil.revrange()`` or
1314 1313 ``repo.anyrevs([expr], user=True)``.
1315 1314
1316 1315 Returns a revset.abstractsmartset, which is a list-like interface
1317 1316 that contains integer revisions.
1318 1317 '''
1319 1318 expr = revsetlang.formatspec(expr, *args)
1320 1319 m = revset.match(None, expr)
1321 1320 return m(self)
1322 1321
1323 1322 def set(self, expr, *args):
1324 1323 '''Find revisions matching a revset and emit changectx instances.
1325 1324
1326 1325 This is a convenience wrapper around ``revs()`` that iterates the
1327 1326 result and is a generator of changectx instances.
1328 1327
1329 1328 Revset aliases from the configuration are not expanded. To expand
1330 1329 user aliases, consider calling ``scmutil.revrange()``.
1331 1330 '''
1332 1331 for r in self.revs(expr, *args):
1333 1332 yield self[r]
1334 1333
1335 1334 def anyrevs(self, specs, user=False, localalias=None):
1336 1335 '''Find revisions matching one of the given revsets.
1337 1336
1338 1337 Revset aliases from the configuration are not expanded by default. To
1339 1338 expand user aliases, specify ``user=True``. To provide some local
1340 1339 definitions overriding user aliases, set ``localalias`` to
1341 1340 ``{name: definitionstring}``.
1342 1341 '''
1343 1342 if user:
1344 1343 m = revset.matchany(self.ui, specs,
1345 1344 lookup=revset.lookupfn(self),
1346 1345 localalias=localalias)
1347 1346 else:
1348 1347 m = revset.matchany(None, specs, localalias=localalias)
1349 1348 return m(self)
1350 1349
1351 1350 def url(self):
1352 1351 return 'file:' + self.root
1353 1352
1354 1353 def hook(self, name, throw=False, **args):
1355 1354 """Call a hook, passing this repo instance.
1356 1355
1357 1356 This a convenience method to aid invoking hooks. Extensions likely
1358 1357 won't call this unless they have registered a custom hook or are
1359 1358 replacing code that is expected to call a hook.
1360 1359 """
1361 1360 return hook.hook(self.ui, self, name, throw, **args)
1362 1361
1363 1362 @filteredpropertycache
1364 1363 def _tagscache(self):
1365 1364 '''Returns a tagscache object that contains various tags related
1366 1365 caches.'''
1367 1366
1368 1367 # This simplifies its cache management by having one decorated
1369 1368 # function (this one) and the rest simply fetch things from it.
1370 1369 class tagscache(object):
1371 1370 def __init__(self):
1372 1371 # These two define the set of tags for this repository. tags
1373 1372 # maps tag name to node; tagtypes maps tag name to 'global' or
1374 1373 # 'local'. (Global tags are defined by .hgtags across all
1375 1374 # heads, and local tags are defined in .hg/localtags.)
1376 1375 # They constitute the in-memory cache of tags.
1377 1376 self.tags = self.tagtypes = None
1378 1377
1379 1378 self.nodetagscache = self.tagslist = None
1380 1379
1381 1380 cache = tagscache()
1382 1381 cache.tags, cache.tagtypes = self._findtags()
1383 1382
1384 1383 return cache
1385 1384
1386 1385 def tags(self):
1387 1386 '''return a mapping of tag to node'''
1388 1387 t = {}
1389 1388 if self.changelog.filteredrevs:
1390 1389 tags, tt = self._findtags()
1391 1390 else:
1392 1391 tags = self._tagscache.tags
1393 1392 for k, v in tags.iteritems():
1394 1393 try:
1395 1394 # ignore tags to unknown nodes
1396 1395 self.changelog.rev(v)
1397 1396 t[k] = v
1398 1397 except (error.LookupError, ValueError):
1399 1398 pass
1400 1399 return t
1401 1400
1402 1401 def _findtags(self):
1403 1402 '''Do the hard work of finding tags. Return a pair of dicts
1404 1403 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1405 1404 maps tag name to a string like \'global\' or \'local\'.
1406 1405 Subclasses or extensions are free to add their own tags, but
1407 1406 should be aware that the returned dicts will be retained for the
1408 1407 duration of the localrepo object.'''
1409 1408
1410 1409 # XXX what tagtype should subclasses/extensions use? Currently
1411 1410 # mq and bookmarks add tags, but do not set the tagtype at all.
1412 1411 # Should each extension invent its own tag type? Should there
1413 1412 # be one tagtype for all such "virtual" tags? Or is the status
1414 1413 # quo fine?
1415 1414
1416 1415
1417 1416 # map tag name to (node, hist)
1418 1417 alltags = tagsmod.findglobaltags(self.ui, self)
1419 1418 # map tag name to tag type
1420 1419 tagtypes = dict((tag, 'global') for tag in alltags)
1421 1420
1422 1421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1423 1422
1424 1423 # Build the return dicts. Have to re-encode tag names because
1425 1424 # the tags module always uses UTF-8 (in order not to lose info
1426 1425 # writing to the cache), but the rest of Mercurial wants them in
1427 1426 # local encoding.
1428 1427 tags = {}
1429 1428 for (name, (node, hist)) in alltags.iteritems():
1430 1429 if node != nullid:
1431 1430 tags[encoding.tolocal(name)] = node
1432 1431 tags['tip'] = self.changelog.tip()
1433 1432 tagtypes = dict([(encoding.tolocal(name), value)
1434 1433 for (name, value) in tagtypes.iteritems()])
1435 1434 return (tags, tagtypes)
1436 1435
1437 1436 def tagtype(self, tagname):
1438 1437 '''
1439 1438 return the type of the given tag. result can be:
1440 1439
1441 1440 'local' : a local tag
1442 1441 'global' : a global tag
1443 1442 None : tag does not exist
1444 1443 '''
1445 1444
1446 1445 return self._tagscache.tagtypes.get(tagname)
1447 1446
1448 1447 def tagslist(self):
1449 1448 '''return a list of tags ordered by revision'''
1450 1449 if not self._tagscache.tagslist:
1451 1450 l = []
1452 1451 for t, n in self.tags().iteritems():
1453 1452 l.append((self.changelog.rev(n), t, n))
1454 1453 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1455 1454
1456 1455 return self._tagscache.tagslist
1457 1456
1458 1457 def nodetags(self, node):
1459 1458 '''return the tags associated with a node'''
1460 1459 if not self._tagscache.nodetagscache:
1461 1460 nodetagscache = {}
1462 1461 for t, n in self._tagscache.tags.iteritems():
1463 1462 nodetagscache.setdefault(n, []).append(t)
1464 1463 for tags in nodetagscache.itervalues():
1465 1464 tags.sort()
1466 1465 self._tagscache.nodetagscache = nodetagscache
1467 1466 return self._tagscache.nodetagscache.get(node, [])
1468 1467
1469 1468 def nodebookmarks(self, node):
1470 1469 """return the list of bookmarks pointing to the specified node"""
1471 1470 return self._bookmarks.names(node)
1472 1471
1473 1472 def branchmap(self):
1474 1473 '''returns a dictionary {branch: [branchheads]} with branchheads
1475 1474 ordered by increasing revision number'''
1476 1475 branchmap.updatecache(self)
1477 1476 return self._branchcaches[self.filtername]
1478 1477
1479 1478 @unfilteredmethod
1480 1479 def revbranchcache(self):
1481 1480 if not self._revbranchcache:
1482 1481 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1483 1482 return self._revbranchcache
1484 1483
1485 1484 def branchtip(self, branch, ignoremissing=False):
1486 1485 '''return the tip node for a given branch
1487 1486
1488 1487 If ignoremissing is True, then this method will not raise an error.
1489 1488 This is helpful for callers that only expect None for a missing branch
1490 1489 (e.g. namespace).
1491 1490
1492 1491 '''
1493 1492 try:
1494 1493 return self.branchmap().branchtip(branch)
1495 1494 except KeyError:
1496 1495 if not ignoremissing:
1497 1496 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1498 1497 else:
1499 1498 pass
1500 1499
1501 1500 def lookup(self, key):
1502 1501 return scmutil.revsymbol(self, key).node()
1503 1502
1504 1503 def lookupbranch(self, key):
1505 1504 if key in self.branchmap():
1506 1505 return key
1507 1506
1508 1507 return scmutil.revsymbol(self, key).branch()
1509 1508
1510 1509 def known(self, nodes):
1511 1510 cl = self.changelog
1512 1511 nm = cl.nodemap
1513 1512 filtered = cl.filteredrevs
1514 1513 result = []
1515 1514 for n in nodes:
1516 1515 r = nm.get(n)
1517 1516 resp = not (r is None or r in filtered)
1518 1517 result.append(resp)
1519 1518 return result
1520 1519
1521 1520 def local(self):
1522 1521 return self
1523 1522
1524 1523 def publishing(self):
1525 1524 # it's safe (and desirable) to trust the publish flag unconditionally
1526 1525 # so that we don't finalize changes shared between users via ssh or nfs
1527 1526 return self.ui.configbool('phases', 'publish', untrusted=True)
1528 1527
1529 1528 def cancopy(self):
1530 1529 # so statichttprepo's override of local() works
1531 1530 if not self.local():
1532 1531 return False
1533 1532 if not self.publishing():
1534 1533 return True
1535 1534 # if publishing we can't copy if there is filtered content
1536 1535 return not self.filtered('visible').changelog.filteredrevs
1537 1536
1538 1537 def shared(self):
1539 1538 '''the type of shared repository (None if not shared)'''
1540 1539 if self.sharedpath != self.path:
1541 1540 return 'store'
1542 1541 return None
1543 1542
1544 1543 def wjoin(self, f, *insidef):
1545 1544 return self.vfs.reljoin(self.root, f, *insidef)
1546 1545
1547 1546 def setparents(self, p1, p2=nullid):
1548 1547 with self.dirstate.parentchange():
1549 1548 copies = self.dirstate.setparents(p1, p2)
1550 1549 pctx = self[p1]
1551 1550 if copies:
1552 1551 # Adjust copy records, the dirstate cannot do it, it
1553 1552 # requires access to parents manifests. Preserve them
1554 1553 # only for entries added to first parent.
1555 1554 for f in copies:
1556 1555 if f not in pctx and copies[f] in pctx:
1557 1556 self.dirstate.copy(copies[f], f)
1558 1557 if p2 == nullid:
1559 1558 for f, s in sorted(self.dirstate.copies().items()):
1560 1559 if f not in pctx and s not in pctx:
1561 1560 self.dirstate.copy(None, f)
1562 1561
1563 1562 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1564 1563 """changeid can be a changeset revision, node, or tag.
1565 1564 fileid can be a file revision or node."""
1566 1565 return context.filectx(self, path, changeid, fileid,
1567 1566 changectx=changectx)
1568 1567
1569 1568 def getcwd(self):
1570 1569 return self.dirstate.getcwd()
1571 1570
1572 1571 def pathto(self, f, cwd=None):
1573 1572 return self.dirstate.pathto(f, cwd)
1574 1573
1575 1574 def _loadfilter(self, filter):
1576 1575 if filter not in self._filterpats:
1577 1576 l = []
1578 1577 for pat, cmd in self.ui.configitems(filter):
1579 1578 if cmd == '!':
1580 1579 continue
1581 1580 mf = matchmod.match(self.root, '', [pat])
1582 1581 fn = None
1583 1582 params = cmd
1584 1583 for name, filterfn in self._datafilters.iteritems():
1585 1584 if cmd.startswith(name):
1586 1585 fn = filterfn
1587 1586 params = cmd[len(name):].lstrip()
1588 1587 break
1589 1588 if not fn:
1590 1589 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1591 1590 # Wrap old filters not supporting keyword arguments
1592 1591 if not pycompat.getargspec(fn)[2]:
1593 1592 oldfn = fn
1594 1593 fn = lambda s, c, **kwargs: oldfn(s, c)
1595 1594 l.append((mf, fn, params))
1596 1595 self._filterpats[filter] = l
1597 1596 return self._filterpats[filter]
1598 1597
1599 1598 def _filter(self, filterpats, filename, data):
1600 1599 for mf, fn, cmd in filterpats:
1601 1600 if mf(filename):
1602 1601 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1603 1602 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1604 1603 break
1605 1604
1606 1605 return data
1607 1606
1608 1607 @unfilteredpropertycache
1609 1608 def _encodefilterpats(self):
1610 1609 return self._loadfilter('encode')
1611 1610
1612 1611 @unfilteredpropertycache
1613 1612 def _decodefilterpats(self):
1614 1613 return self._loadfilter('decode')
1615 1614
1616 1615 def adddatafilter(self, name, filter):
1617 1616 self._datafilters[name] = filter
1618 1617
1619 1618 def wread(self, filename):
1620 1619 if self.wvfs.islink(filename):
1621 1620 data = self.wvfs.readlink(filename)
1622 1621 else:
1623 1622 data = self.wvfs.read(filename)
1624 1623 return self._filter(self._encodefilterpats, filename, data)
1625 1624
1626 1625 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1627 1626 """write ``data`` into ``filename`` in the working directory
1628 1627
1629 1628 This returns length of written (maybe decoded) data.
1630 1629 """
1631 1630 data = self._filter(self._decodefilterpats, filename, data)
1632 1631 if 'l' in flags:
1633 1632 self.wvfs.symlink(data, filename)
1634 1633 else:
1635 1634 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1636 1635 **kwargs)
1637 1636 if 'x' in flags:
1638 1637 self.wvfs.setflags(filename, False, True)
1639 1638 else:
1640 1639 self.wvfs.setflags(filename, False, False)
1641 1640 return len(data)
1642 1641
1643 1642 def wwritedata(self, filename, data):
1644 1643 return self._filter(self._decodefilterpats, filename, data)
1645 1644
1646 1645 def currenttransaction(self):
1647 1646 """return the current transaction or None if non exists"""
1648 1647 if self._transref:
1649 1648 tr = self._transref()
1650 1649 else:
1651 1650 tr = None
1652 1651
1653 1652 if tr and tr.running():
1654 1653 return tr
1655 1654 return None
1656 1655
1657 1656 def transaction(self, desc, report=None):
1658 1657 if (self.ui.configbool('devel', 'all-warnings')
1659 1658 or self.ui.configbool('devel', 'check-locks')):
1660 1659 if self._currentlock(self._lockref) is None:
1661 1660 raise error.ProgrammingError('transaction requires locking')
1662 1661 tr = self.currenttransaction()
1663 1662 if tr is not None:
1664 1663 return tr.nest(name=desc)
1665 1664
1666 1665 # abort here if the journal already exists
1667 1666 if self.svfs.exists("journal"):
1668 1667 raise error.RepoError(
1669 1668 _("abandoned transaction found"),
1670 1669 hint=_("run 'hg recover' to clean up transaction"))
1671 1670
1672 1671 idbase = "%.40f#%f" % (random.random(), time.time())
1673 1672 ha = hex(hashlib.sha1(idbase).digest())
1674 1673 txnid = 'TXN:' + ha
1675 1674 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1676 1675
1677 1676 self._writejournal(desc)
1678 1677 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1679 1678 if report:
1680 1679 rp = report
1681 1680 else:
1682 1681 rp = self.ui.warn
1683 1682 vfsmap = {'plain': self.vfs} # root of .hg/
1684 1683 # we must avoid cyclic reference between repo and transaction.
1685 1684 reporef = weakref.ref(self)
1686 1685 # Code to track tag movement
1687 1686 #
1688 1687 # Since tags are all handled as file content, it is actually quite hard
1689 1688 # to track these movement from a code perspective. So we fallback to a
1690 1689 # tracking at the repository level. One could envision to track changes
1691 1690 # to the '.hgtags' file through changegroup apply but that fails to
1692 1691 # cope with case where transaction expose new heads without changegroup
1693 1692 # being involved (eg: phase movement).
1694 1693 #
1695 1694 # For now, We gate the feature behind a flag since this likely comes
1696 1695 # with performance impacts. The current code run more often than needed
1697 1696 # and do not use caches as much as it could. The current focus is on
1698 1697 # the behavior of the feature so we disable it by default. The flag
1699 1698 # will be removed when we are happy with the performance impact.
1700 1699 #
1701 1700 # Once this feature is no longer experimental move the following
1702 1701 # documentation to the appropriate help section:
1703 1702 #
1704 1703 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1705 1704 # tags (new or changed or deleted tags). In addition the details of
1706 1705 # these changes are made available in a file at:
1707 1706 # ``REPOROOT/.hg/changes/tags.changes``.
1708 1707 # Make sure you check for HG_TAG_MOVED before reading that file as it
1709 1708 # might exist from a previous transaction even if no tag were touched
1710 1709 # in this one. Changes are recorded in a line base format::
1711 1710 #
1712 1711 # <action> <hex-node> <tag-name>\n
1713 1712 #
1714 1713 # Actions are defined as follow:
1715 1714 # "-R": tag is removed,
1716 1715 # "+A": tag is added,
1717 1716 # "-M": tag is moved (old value),
1718 1717 # "+M": tag is moved (new value),
1719 1718 tracktags = lambda x: None
1720 1719 # experimental config: experimental.hook-track-tags
1721 1720 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1722 1721 if desc != 'strip' and shouldtracktags:
1723 1722 oldheads = self.changelog.headrevs()
1724 1723 def tracktags(tr2):
1725 1724 repo = reporef()
1726 1725 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1727 1726 newheads = repo.changelog.headrevs()
1728 1727 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1729 1728 # notes: we compare lists here.
1730 1729 # As we do it only once buiding set would not be cheaper
1731 1730 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1732 1731 if changes:
1733 1732 tr2.hookargs['tag_moved'] = '1'
1734 1733 with repo.vfs('changes/tags.changes', 'w',
1735 1734 atomictemp=True) as changesfile:
1736 1735 # note: we do not register the file to the transaction
1737 1736 # because we needs it to still exist on the transaction
1738 1737 # is close (for txnclose hooks)
1739 1738 tagsmod.writediff(changesfile, changes)
1740 1739 def validate(tr2):
1741 1740 """will run pre-closing hooks"""
1742 1741 # XXX the transaction API is a bit lacking here so we take a hacky
1743 1742 # path for now
1744 1743 #
1745 1744 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1746 1745 # dict is copied before these run. In addition we needs the data
1747 1746 # available to in memory hooks too.
1748 1747 #
1749 1748 # Moreover, we also need to make sure this runs before txnclose
1750 1749 # hooks and there is no "pending" mechanism that would execute
1751 1750 # logic only if hooks are about to run.
1752 1751 #
1753 1752 # Fixing this limitation of the transaction is also needed to track
1754 1753 # other families of changes (bookmarks, phases, obsolescence).
1755 1754 #
1756 1755 # This will have to be fixed before we remove the experimental
1757 1756 # gating.
1758 1757 tracktags(tr2)
1759 1758 repo = reporef()
1760 1759 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1761 1760 scmutil.enforcesinglehead(repo, tr2, desc)
1762 1761 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1763 1762 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1764 1763 args = tr.hookargs.copy()
1765 1764 args.update(bookmarks.preparehookargs(name, old, new))
1766 1765 repo.hook('pretxnclose-bookmark', throw=True,
1767 1766 txnname=desc,
1768 1767 **pycompat.strkwargs(args))
1769 1768 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1770 1769 cl = repo.unfiltered().changelog
1771 1770 for rev, (old, new) in tr.changes['phases'].items():
1772 1771 args = tr.hookargs.copy()
1773 1772 node = hex(cl.node(rev))
1774 1773 args.update(phases.preparehookargs(node, old, new))
1775 1774 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1776 1775 **pycompat.strkwargs(args))
1777 1776
1778 1777 repo.hook('pretxnclose', throw=True,
1779 1778 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1780 1779 def releasefn(tr, success):
1781 1780 repo = reporef()
1782 1781 if success:
1783 1782 # this should be explicitly invoked here, because
1784 1783 # in-memory changes aren't written out at closing
1785 1784 # transaction, if tr.addfilegenerator (via
1786 1785 # dirstate.write or so) isn't invoked while
1787 1786 # transaction running
1788 1787 repo.dirstate.write(None)
1789 1788 else:
1790 1789 # discard all changes (including ones already written
1791 1790 # out) in this transaction
1792 1791 narrowspec.restorebackup(self, 'journal.narrowspec')
1793 1792 repo.dirstate.restorebackup(None, 'journal.dirstate')
1794 1793
1795 1794 repo.invalidate(clearfilecache=True)
1796 1795
1797 1796 tr = transaction.transaction(rp, self.svfs, vfsmap,
1798 1797 "journal",
1799 1798 "undo",
1800 1799 aftertrans(renames),
1801 1800 self.store.createmode,
1802 1801 validator=validate,
1803 1802 releasefn=releasefn,
1804 1803 checkambigfiles=_cachedfiles,
1805 1804 name=desc)
1806 1805 tr.changes['origrepolen'] = len(self)
1807 1806 tr.changes['obsmarkers'] = set()
1808 1807 tr.changes['phases'] = {}
1809 1808 tr.changes['bookmarks'] = {}
1810 1809
1811 1810 tr.hookargs['txnid'] = txnid
1812 1811 # note: writing the fncache only during finalize mean that the file is
1813 1812 # outdated when running hooks. As fncache is used for streaming clone,
1814 1813 # this is not expected to break anything that happen during the hooks.
1815 1814 tr.addfinalize('flush-fncache', self.store.write)
1816 1815 def txnclosehook(tr2):
1817 1816 """To be run if transaction is successful, will schedule a hook run
1818 1817 """
1819 1818 # Don't reference tr2 in hook() so we don't hold a reference.
1820 1819 # This reduces memory consumption when there are multiple
1821 1820 # transactions per lock. This can likely go away if issue5045
1822 1821 # fixes the function accumulation.
1823 1822 hookargs = tr2.hookargs
1824 1823
1825 1824 def hookfunc():
1826 1825 repo = reporef()
1827 1826 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1828 1827 bmchanges = sorted(tr.changes['bookmarks'].items())
1829 1828 for name, (old, new) in bmchanges:
1830 1829 args = tr.hookargs.copy()
1831 1830 args.update(bookmarks.preparehookargs(name, old, new))
1832 1831 repo.hook('txnclose-bookmark', throw=False,
1833 1832 txnname=desc, **pycompat.strkwargs(args))
1834 1833
1835 1834 if hook.hashook(repo.ui, 'txnclose-phase'):
1836 1835 cl = repo.unfiltered().changelog
1837 1836 phasemv = sorted(tr.changes['phases'].items())
1838 1837 for rev, (old, new) in phasemv:
1839 1838 args = tr.hookargs.copy()
1840 1839 node = hex(cl.node(rev))
1841 1840 args.update(phases.preparehookargs(node, old, new))
1842 1841 repo.hook('txnclose-phase', throw=False, txnname=desc,
1843 1842 **pycompat.strkwargs(args))
1844 1843
1845 1844 repo.hook('txnclose', throw=False, txnname=desc,
1846 1845 **pycompat.strkwargs(hookargs))
1847 1846 reporef()._afterlock(hookfunc)
1848 1847 tr.addfinalize('txnclose-hook', txnclosehook)
1849 1848 # Include a leading "-" to make it happen before the transaction summary
1850 1849 # reports registered via scmutil.registersummarycallback() whose names
1851 1850 # are 00-txnreport etc. That way, the caches will be warm when the
1852 1851 # callbacks run.
1853 1852 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1854 1853 def txnaborthook(tr2):
1855 1854 """To be run if transaction is aborted
1856 1855 """
1857 1856 reporef().hook('txnabort', throw=False, txnname=desc,
1858 1857 **pycompat.strkwargs(tr2.hookargs))
1859 1858 tr.addabort('txnabort-hook', txnaborthook)
1860 1859 # avoid eager cache invalidation. in-memory data should be identical
1861 1860 # to stored data if transaction has no error.
1862 1861 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1863 1862 self._transref = weakref.ref(tr)
1864 1863 scmutil.registersummarycallback(self, tr, desc)
1865 1864 return tr
1866 1865
1867 1866 def _journalfiles(self):
1868 1867 return ((self.svfs, 'journal'),
1869 1868 (self.vfs, 'journal.dirstate'),
1870 1869 (self.vfs, 'journal.branch'),
1871 1870 (self.vfs, 'journal.desc'),
1872 1871 (self.vfs, 'journal.bookmarks'),
1873 1872 (self.svfs, 'journal.phaseroots'))
1874 1873
1875 1874 def undofiles(self):
1876 1875 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1877 1876
1878 1877 @unfilteredmethod
1879 1878 def _writejournal(self, desc):
1880 1879 self.dirstate.savebackup(None, 'journal.dirstate')
1881 1880 narrowspec.savebackup(self, 'journal.narrowspec')
1882 1881 self.vfs.write("journal.branch",
1883 1882 encoding.fromlocal(self.dirstate.branch()))
1884 1883 self.vfs.write("journal.desc",
1885 1884 "%d\n%s\n" % (len(self), desc))
1886 1885 self.vfs.write("journal.bookmarks",
1887 1886 self.vfs.tryread("bookmarks"))
1888 1887 self.svfs.write("journal.phaseroots",
1889 1888 self.svfs.tryread("phaseroots"))
1890 1889
1891 1890 def recover(self):
1892 1891 with self.lock():
1893 1892 if self.svfs.exists("journal"):
1894 1893 self.ui.status(_("rolling back interrupted transaction\n"))
1895 1894 vfsmap = {'': self.svfs,
1896 1895 'plain': self.vfs,}
1897 1896 transaction.rollback(self.svfs, vfsmap, "journal",
1898 1897 self.ui.warn,
1899 1898 checkambigfiles=_cachedfiles)
1900 1899 self.invalidate()
1901 1900 return True
1902 1901 else:
1903 1902 self.ui.warn(_("no interrupted transaction available\n"))
1904 1903 return False
1905 1904
1906 1905 def rollback(self, dryrun=False, force=False):
1907 1906 wlock = lock = dsguard = None
1908 1907 try:
1909 1908 wlock = self.wlock()
1910 1909 lock = self.lock()
1911 1910 if self.svfs.exists("undo"):
1912 1911 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1913 1912
1914 1913 return self._rollback(dryrun, force, dsguard)
1915 1914 else:
1916 1915 self.ui.warn(_("no rollback information available\n"))
1917 1916 return 1
1918 1917 finally:
1919 1918 release(dsguard, lock, wlock)
1920 1919
1921 1920 @unfilteredmethod # Until we get smarter cache management
1922 1921 def _rollback(self, dryrun, force, dsguard):
1923 1922 ui = self.ui
1924 1923 try:
1925 1924 args = self.vfs.read('undo.desc').splitlines()
1926 1925 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1927 1926 if len(args) >= 3:
1928 1927 detail = args[2]
1929 1928 oldtip = oldlen - 1
1930 1929
1931 1930 if detail and ui.verbose:
1932 1931 msg = (_('repository tip rolled back to revision %d'
1933 1932 ' (undo %s: %s)\n')
1934 1933 % (oldtip, desc, detail))
1935 1934 else:
1936 1935 msg = (_('repository tip rolled back to revision %d'
1937 1936 ' (undo %s)\n')
1938 1937 % (oldtip, desc))
1939 1938 except IOError:
1940 1939 msg = _('rolling back unknown transaction\n')
1941 1940 desc = None
1942 1941
1943 1942 if not force and self['.'] != self['tip'] and desc == 'commit':
1944 1943 raise error.Abort(
1945 1944 _('rollback of last commit while not checked out '
1946 1945 'may lose data'), hint=_('use -f to force'))
1947 1946
1948 1947 ui.status(msg)
1949 1948 if dryrun:
1950 1949 return 0
1951 1950
1952 1951 parents = self.dirstate.parents()
1953 1952 self.destroying()
1954 1953 vfsmap = {'plain': self.vfs, '': self.svfs}
1955 1954 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1956 1955 checkambigfiles=_cachedfiles)
1957 1956 if self.vfs.exists('undo.bookmarks'):
1958 1957 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1959 1958 if self.svfs.exists('undo.phaseroots'):
1960 1959 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1961 1960 self.invalidate()
1962 1961
1963 1962 parentgone = (parents[0] not in self.changelog.nodemap or
1964 1963 parents[1] not in self.changelog.nodemap)
1965 1964 if parentgone:
1966 1965 # prevent dirstateguard from overwriting already restored one
1967 1966 dsguard.close()
1968 1967
1969 1968 narrowspec.restorebackup(self, 'undo.narrowspec')
1970 1969 self.dirstate.restorebackup(None, 'undo.dirstate')
1971 1970 try:
1972 1971 branch = self.vfs.read('undo.branch')
1973 1972 self.dirstate.setbranch(encoding.tolocal(branch))
1974 1973 except IOError:
1975 1974 ui.warn(_('named branch could not be reset: '
1976 1975 'current branch is still \'%s\'\n')
1977 1976 % self.dirstate.branch())
1978 1977
1979 1978 parents = tuple([p.rev() for p in self[None].parents()])
1980 1979 if len(parents) > 1:
1981 1980 ui.status(_('working directory now based on '
1982 1981 'revisions %d and %d\n') % parents)
1983 1982 else:
1984 1983 ui.status(_('working directory now based on '
1985 1984 'revision %d\n') % parents)
1986 1985 mergemod.mergestate.clean(self, self['.'].node())
1987 1986
1988 1987 # TODO: if we know which new heads may result from this rollback, pass
1989 1988 # them to destroy(), which will prevent the branchhead cache from being
1990 1989 # invalidated.
1991 1990 self.destroyed()
1992 1991 return 0
1993 1992
1994 1993 def _buildcacheupdater(self, newtransaction):
1995 1994 """called during transaction to build the callback updating cache
1996 1995
1997 1996 Lives on the repository to help extension who might want to augment
1998 1997 this logic. For this purpose, the created transaction is passed to the
1999 1998 method.
2000 1999 """
2001 2000 # we must avoid cyclic reference between repo and transaction.
2002 2001 reporef = weakref.ref(self)
2003 2002 def updater(tr):
2004 2003 repo = reporef()
2005 2004 repo.updatecaches(tr)
2006 2005 return updater
2007 2006
2008 2007 @unfilteredmethod
2009 2008 def updatecaches(self, tr=None, full=False):
2010 2009 """warm appropriate caches
2011 2010
2012 2011 If this function is called after a transaction closed. The transaction
2013 2012 will be available in the 'tr' argument. This can be used to selectively
2014 2013 update caches relevant to the changes in that transaction.
2015 2014
2016 2015 If 'full' is set, make sure all caches the function knows about have
2017 2016 up-to-date data. Even the ones usually loaded more lazily.
2018 2017 """
2019 2018 if tr is not None and tr.hookargs.get('source') == 'strip':
2020 2019 # During strip, many caches are invalid but
2021 2020 # later call to `destroyed` will refresh them.
2022 2021 return
2023 2022
2024 2023 if tr is None or tr.changes['origrepolen'] < len(self):
2025 2024 # updating the unfiltered branchmap should refresh all the others,
2026 2025 self.ui.debug('updating the branch cache\n')
2027 2026 branchmap.updatecache(self.filtered('served'))
2028 2027
2029 2028 if full:
2030 2029 rbc = self.revbranchcache()
2031 2030 for r in self.changelog:
2032 2031 rbc.branchinfo(r)
2033 2032 rbc.write()
2034 2033
2035 2034 # ensure the working copy parents are in the manifestfulltextcache
2036 2035 for ctx in self['.'].parents():
2037 2036 ctx.manifest() # accessing the manifest is enough
2038 2037
2039 2038 def invalidatecaches(self):
2040 2039
2041 2040 if '_tagscache' in vars(self):
2042 2041 # can't use delattr on proxy
2043 2042 del self.__dict__['_tagscache']
2044 2043
2045 2044 self.unfiltered()._branchcaches.clear()
2046 2045 self.invalidatevolatilesets()
2047 2046 self._sparsesignaturecache.clear()
2048 2047
2049 2048 def invalidatevolatilesets(self):
2050 2049 self.filteredrevcache.clear()
2051 2050 obsolete.clearobscaches(self)
2052 2051
2053 2052 def invalidatedirstate(self):
2054 2053 '''Invalidates the dirstate, causing the next call to dirstate
2055 2054 to check if it was modified since the last time it was read,
2056 2055 rereading it if it has.
2057 2056
2058 2057 This is different to dirstate.invalidate() that it doesn't always
2059 2058 rereads the dirstate. Use dirstate.invalidate() if you want to
2060 2059 explicitly read the dirstate again (i.e. restoring it to a previous
2061 2060 known good state).'''
2062 2061 if hasunfilteredcache(self, 'dirstate'):
2063 2062 for k in self.dirstate._filecache:
2064 2063 try:
2065 2064 delattr(self.dirstate, k)
2066 2065 except AttributeError:
2067 2066 pass
2068 2067 delattr(self.unfiltered(), 'dirstate')
2069 2068
2070 2069 def invalidate(self, clearfilecache=False):
2071 2070 '''Invalidates both store and non-store parts other than dirstate
2072 2071
2073 2072 If a transaction is running, invalidation of store is omitted,
2074 2073 because discarding in-memory changes might cause inconsistency
2075 2074 (e.g. incomplete fncache causes unintentional failure, but
2076 2075 redundant one doesn't).
2077 2076 '''
2078 2077 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2079 2078 for k in list(self._filecache.keys()):
2080 2079 # dirstate is invalidated separately in invalidatedirstate()
2081 2080 if k == 'dirstate':
2082 2081 continue
2083 2082 if (k == 'changelog' and
2084 2083 self.currenttransaction() and
2085 2084 self.changelog._delayed):
2086 2085 # The changelog object may store unwritten revisions. We don't
2087 2086 # want to lose them.
2088 2087 # TODO: Solve the problem instead of working around it.
2089 2088 continue
2090 2089
2091 2090 if clearfilecache:
2092 2091 del self._filecache[k]
2093 2092 try:
2094 2093 delattr(unfiltered, k)
2095 2094 except AttributeError:
2096 2095 pass
2097 2096 self.invalidatecaches()
2098 2097 if not self.currenttransaction():
2099 2098 # TODO: Changing contents of store outside transaction
2100 2099 # causes inconsistency. We should make in-memory store
2101 2100 # changes detectable, and abort if changed.
2102 2101 self.store.invalidatecaches()
2103 2102
2104 2103 def invalidateall(self):
2105 2104 '''Fully invalidates both store and non-store parts, causing the
2106 2105 subsequent operation to reread any outside changes.'''
2107 2106 # extension should hook this to invalidate its caches
2108 2107 self.invalidate()
2109 2108 self.invalidatedirstate()
2110 2109
2111 2110 @unfilteredmethod
2112 2111 def _refreshfilecachestats(self, tr):
2113 2112 """Reload stats of cached files so that they are flagged as valid"""
2114 2113 for k, ce in self._filecache.items():
2115 2114 k = pycompat.sysstr(k)
2116 2115 if k == r'dirstate' or k not in self.__dict__:
2117 2116 continue
2118 2117 ce.refresh()
2119 2118
2120 2119 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2121 2120 inheritchecker=None, parentenvvar=None):
2122 2121 parentlock = None
2123 2122 # the contents of parentenvvar are used by the underlying lock to
2124 2123 # determine whether it can be inherited
2125 2124 if parentenvvar is not None:
2126 2125 parentlock = encoding.environ.get(parentenvvar)
2127 2126
2128 2127 timeout = 0
2129 2128 warntimeout = 0
2130 2129 if wait:
2131 2130 timeout = self.ui.configint("ui", "timeout")
2132 2131 warntimeout = self.ui.configint("ui", "timeout.warn")
2133 2132 # internal config: ui.signal-safe-lock
2134 2133 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2135 2134
2136 2135 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2137 2136 releasefn=releasefn,
2138 2137 acquirefn=acquirefn, desc=desc,
2139 2138 inheritchecker=inheritchecker,
2140 2139 parentlock=parentlock,
2141 2140 signalsafe=signalsafe)
2142 2141 return l
2143 2142
2144 2143 def _afterlock(self, callback):
2145 2144 """add a callback to be run when the repository is fully unlocked
2146 2145
2147 2146 The callback will be executed when the outermost lock is released
2148 2147 (with wlock being higher level than 'lock')."""
2149 2148 for ref in (self._wlockref, self._lockref):
2150 2149 l = ref and ref()
2151 2150 if l and l.held:
2152 2151 l.postrelease.append(callback)
2153 2152 break
2154 2153 else: # no lock have been found.
2155 2154 callback()
2156 2155
2157 2156 def lock(self, wait=True):
2158 2157 '''Lock the repository store (.hg/store) and return a weak reference
2159 2158 to the lock. Use this before modifying the store (e.g. committing or
2160 2159 stripping). If you are opening a transaction, get a lock as well.)
2161 2160
2162 2161 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2163 2162 'wlock' first to avoid a dead-lock hazard.'''
2164 2163 l = self._currentlock(self._lockref)
2165 2164 if l is not None:
2166 2165 l.lock()
2167 2166 return l
2168 2167
2169 2168 l = self._lock(self.svfs, "lock", wait, None,
2170 2169 self.invalidate, _('repository %s') % self.origroot)
2171 2170 self._lockref = weakref.ref(l)
2172 2171 return l
2173 2172
2174 2173 def _wlockchecktransaction(self):
2175 2174 if self.currenttransaction() is not None:
2176 2175 raise error.LockInheritanceContractViolation(
2177 2176 'wlock cannot be inherited in the middle of a transaction')
2178 2177
2179 2178 def wlock(self, wait=True):
2180 2179 '''Lock the non-store parts of the repository (everything under
2181 2180 .hg except .hg/store) and return a weak reference to the lock.
2182 2181
2183 2182 Use this before modifying files in .hg.
2184 2183
2185 2184 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2186 2185 'wlock' first to avoid a dead-lock hazard.'''
2187 2186 l = self._wlockref and self._wlockref()
2188 2187 if l is not None and l.held:
2189 2188 l.lock()
2190 2189 return l
2191 2190
2192 2191 # We do not need to check for non-waiting lock acquisition. Such
2193 2192 # acquisition would not cause dead-lock as they would just fail.
2194 2193 if wait and (self.ui.configbool('devel', 'all-warnings')
2195 2194 or self.ui.configbool('devel', 'check-locks')):
2196 2195 if self._currentlock(self._lockref) is not None:
2197 2196 self.ui.develwarn('"wlock" acquired after "lock"')
2198 2197
2199 2198 def unlock():
2200 2199 if self.dirstate.pendingparentchange():
2201 2200 self.dirstate.invalidate()
2202 2201 else:
2203 2202 self.dirstate.write(None)
2204 2203
2205 2204 self._filecache['dirstate'].refresh()
2206 2205
2207 2206 l = self._lock(self.vfs, "wlock", wait, unlock,
2208 2207 self.invalidatedirstate, _('working directory of %s') %
2209 2208 self.origroot,
2210 2209 inheritchecker=self._wlockchecktransaction,
2211 2210 parentenvvar='HG_WLOCK_LOCKER')
2212 2211 self._wlockref = weakref.ref(l)
2213 2212 return l
2214 2213
2215 2214 def _currentlock(self, lockref):
2216 2215 """Returns the lock if it's held, or None if it's not."""
2217 2216 if lockref is None:
2218 2217 return None
2219 2218 l = lockref()
2220 2219 if l is None or not l.held:
2221 2220 return None
2222 2221 return l
2223 2222
2224 2223 def currentwlock(self):
2225 2224 """Returns the wlock if it's held, or None if it's not."""
2226 2225 return self._currentlock(self._wlockref)
2227 2226
2228 2227 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2229 2228 """
2230 2229 commit an individual file as part of a larger transaction
2231 2230 """
2232 2231
2233 2232 fname = fctx.path()
2234 2233 fparent1 = manifest1.get(fname, nullid)
2235 2234 fparent2 = manifest2.get(fname, nullid)
2236 2235 if isinstance(fctx, context.filectx):
2237 2236 node = fctx.filenode()
2238 2237 if node in [fparent1, fparent2]:
2239 2238 self.ui.debug('reusing %s filelog entry\n' % fname)
2240 2239 if manifest1.flags(fname) != fctx.flags():
2241 2240 changelist.append(fname)
2242 2241 return node
2243 2242
2244 2243 flog = self.file(fname)
2245 2244 meta = {}
2246 2245 copy = fctx.renamed()
2247 2246 if copy and copy[0] != fname:
2248 2247 # Mark the new revision of this file as a copy of another
2249 2248 # file. This copy data will effectively act as a parent
2250 2249 # of this new revision. If this is a merge, the first
2251 2250 # parent will be the nullid (meaning "look up the copy data")
2252 2251 # and the second one will be the other parent. For example:
2253 2252 #
2254 2253 # 0 --- 1 --- 3 rev1 changes file foo
2255 2254 # \ / rev2 renames foo to bar and changes it
2256 2255 # \- 2 -/ rev3 should have bar with all changes and
2257 2256 # should record that bar descends from
2258 2257 # bar in rev2 and foo in rev1
2259 2258 #
2260 2259 # this allows this merge to succeed:
2261 2260 #
2262 2261 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2263 2262 # \ / merging rev3 and rev4 should use bar@rev2
2264 2263 # \- 2 --- 4 as the merge base
2265 2264 #
2266 2265
2267 2266 cfname = copy[0]
2268 2267 crev = manifest1.get(cfname)
2269 2268 newfparent = fparent2
2270 2269
2271 2270 if manifest2: # branch merge
2272 2271 if fparent2 == nullid or crev is None: # copied on remote side
2273 2272 if cfname in manifest2:
2274 2273 crev = manifest2[cfname]
2275 2274 newfparent = fparent1
2276 2275
2277 2276 # Here, we used to search backwards through history to try to find
2278 2277 # where the file copy came from if the source of a copy was not in
2279 2278 # the parent directory. However, this doesn't actually make sense to
2280 2279 # do (what does a copy from something not in your working copy even
2281 2280 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2282 2281 # the user that copy information was dropped, so if they didn't
2283 2282 # expect this outcome it can be fixed, but this is the correct
2284 2283 # behavior in this circumstance.
2285 2284
2286 2285 if crev:
2287 2286 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2288 2287 meta["copy"] = cfname
2289 2288 meta["copyrev"] = hex(crev)
2290 2289 fparent1, fparent2 = nullid, newfparent
2291 2290 else:
2292 2291 self.ui.warn(_("warning: can't find ancestor for '%s' "
2293 2292 "copied from '%s'!\n") % (fname, cfname))
2294 2293
2295 2294 elif fparent1 == nullid:
2296 2295 fparent1, fparent2 = fparent2, nullid
2297 2296 elif fparent2 != nullid:
2298 2297 # is one parent an ancestor of the other?
2299 2298 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2300 2299 if fparent1 in fparentancestors:
2301 2300 fparent1, fparent2 = fparent2, nullid
2302 2301 elif fparent2 in fparentancestors:
2303 2302 fparent2 = nullid
2304 2303
2305 2304 # is the file changed?
2306 2305 text = fctx.data()
2307 2306 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2308 2307 changelist.append(fname)
2309 2308 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2310 2309 # are just the flags changed during merge?
2311 2310 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2312 2311 changelist.append(fname)
2313 2312
2314 2313 return fparent1
2315 2314
2316 2315 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2317 2316 """check for commit arguments that aren't committable"""
2318 2317 if match.isexact() or match.prefix():
2319 2318 matched = set(status.modified + status.added + status.removed)
2320 2319
2321 2320 for f in match.files():
2322 2321 f = self.dirstate.normalize(f)
2323 2322 if f == '.' or f in matched or f in wctx.substate:
2324 2323 continue
2325 2324 if f in status.deleted:
2326 2325 fail(f, _('file not found!'))
2327 2326 if f in vdirs: # visited directory
2328 2327 d = f + '/'
2329 2328 for mf in matched:
2330 2329 if mf.startswith(d):
2331 2330 break
2332 2331 else:
2333 2332 fail(f, _("no match under directory!"))
2334 2333 elif f not in self.dirstate:
2335 2334 fail(f, _("file not tracked!"))
2336 2335
2337 2336 @unfilteredmethod
2338 2337 def commit(self, text="", user=None, date=None, match=None, force=False,
2339 2338 editor=False, extra=None):
2340 2339 """Add a new revision to current repository.
2341 2340
2342 2341 Revision information is gathered from the working directory,
2343 2342 match can be used to filter the committed files. If editor is
2344 2343 supplied, it is called to get a commit message.
2345 2344 """
2346 2345 if extra is None:
2347 2346 extra = {}
2348 2347
2349 2348 def fail(f, msg):
2350 2349 raise error.Abort('%s: %s' % (f, msg))
2351 2350
2352 2351 if not match:
2353 2352 match = matchmod.always(self.root, '')
2354 2353
2355 2354 if not force:
2356 2355 vdirs = []
2357 2356 match.explicitdir = vdirs.append
2358 2357 match.bad = fail
2359 2358
2360 2359 wlock = lock = tr = None
2361 2360 try:
2362 2361 wlock = self.wlock()
2363 2362 lock = self.lock() # for recent changelog (see issue4368)
2364 2363
2365 2364 wctx = self[None]
2366 2365 merge = len(wctx.parents()) > 1
2367 2366
2368 2367 if not force and merge and not match.always():
2369 2368 raise error.Abort(_('cannot partially commit a merge '
2370 2369 '(do not specify files or patterns)'))
2371 2370
2372 2371 status = self.status(match=match, clean=force)
2373 2372 if force:
2374 2373 status.modified.extend(status.clean) # mq may commit clean files
2375 2374
2376 2375 # check subrepos
2377 2376 subs, commitsubs, newstate = subrepoutil.precommit(
2378 2377 self.ui, wctx, status, match, force=force)
2379 2378
2380 2379 # make sure all explicit patterns are matched
2381 2380 if not force:
2382 2381 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2383 2382
2384 2383 cctx = context.workingcommitctx(self, status,
2385 2384 text, user, date, extra)
2386 2385
2387 2386 # internal config: ui.allowemptycommit
2388 2387 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2389 2388 or extra.get('close') or merge or cctx.files()
2390 2389 or self.ui.configbool('ui', 'allowemptycommit'))
2391 2390 if not allowemptycommit:
2392 2391 return None
2393 2392
2394 2393 if merge and cctx.deleted():
2395 2394 raise error.Abort(_("cannot commit merge with missing files"))
2396 2395
2397 2396 ms = mergemod.mergestate.read(self)
2398 2397 mergeutil.checkunresolved(ms)
2399 2398
2400 2399 if editor:
2401 2400 cctx._text = editor(self, cctx, subs)
2402 2401 edited = (text != cctx._text)
2403 2402
2404 2403 # Save commit message in case this transaction gets rolled back
2405 2404 # (e.g. by a pretxncommit hook). Leave the content alone on
2406 2405 # the assumption that the user will use the same editor again.
2407 2406 msgfn = self.savecommitmessage(cctx._text)
2408 2407
2409 2408 # commit subs and write new state
2410 2409 if subs:
2411 2410 for s in sorted(commitsubs):
2412 2411 sub = wctx.sub(s)
2413 2412 self.ui.status(_('committing subrepository %s\n') %
2414 2413 subrepoutil.subrelpath(sub))
2415 2414 sr = sub.commit(cctx._text, user, date)
2416 2415 newstate[s] = (newstate[s][0], sr)
2417 2416 subrepoutil.writestate(self, newstate)
2418 2417
2419 2418 p1, p2 = self.dirstate.parents()
2420 2419 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2421 2420 try:
2422 2421 self.hook("precommit", throw=True, parent1=hookp1,
2423 2422 parent2=hookp2)
2424 2423 tr = self.transaction('commit')
2425 2424 ret = self.commitctx(cctx, True)
2426 2425 except: # re-raises
2427 2426 if edited:
2428 2427 self.ui.write(
2429 2428 _('note: commit message saved in %s\n') % msgfn)
2430 2429 raise
2431 2430 # update bookmarks, dirstate and mergestate
2432 2431 bookmarks.update(self, [p1, p2], ret)
2433 2432 cctx.markcommitted(ret)
2434 2433 ms.reset()
2435 2434 tr.close()
2436 2435
2437 2436 finally:
2438 2437 lockmod.release(tr, lock, wlock)
2439 2438
2440 2439 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2441 2440 # hack for command that use a temporary commit (eg: histedit)
2442 2441 # temporary commit got stripped before hook release
2443 2442 if self.changelog.hasnode(ret):
2444 2443 self.hook("commit", node=node, parent1=parent1,
2445 2444 parent2=parent2)
2446 2445 self._afterlock(commithook)
2447 2446 return ret
2448 2447
2449 2448 @unfilteredmethod
2450 2449 def commitctx(self, ctx, error=False):
2451 2450 """Add a new revision to current repository.
2452 2451 Revision information is passed via the context argument.
2453 2452
2454 2453 ctx.files() should list all files involved in this commit, i.e.
2455 2454 modified/added/removed files. On merge, it may be wider than the
2456 2455 ctx.files() to be committed, since any file nodes derived directly
2457 2456 from p1 or p2 are excluded from the committed ctx.files().
2458 2457 """
2459 2458
2460 2459 tr = None
2461 2460 p1, p2 = ctx.p1(), ctx.p2()
2462 2461 user = ctx.user()
2463 2462
2464 2463 lock = self.lock()
2465 2464 try:
2466 2465 tr = self.transaction("commit")
2467 2466 trp = weakref.proxy(tr)
2468 2467
2469 2468 if ctx.manifestnode():
2470 2469 # reuse an existing manifest revision
2471 2470 self.ui.debug('reusing known manifest\n')
2472 2471 mn = ctx.manifestnode()
2473 2472 files = ctx.files()
2474 2473 elif ctx.files():
2475 2474 m1ctx = p1.manifestctx()
2476 2475 m2ctx = p2.manifestctx()
2477 2476 mctx = m1ctx.copy()
2478 2477
2479 2478 m = mctx.read()
2480 2479 m1 = m1ctx.read()
2481 2480 m2 = m2ctx.read()
2482 2481
2483 2482 # check in files
2484 2483 added = []
2485 2484 changed = []
2486 2485 removed = list(ctx.removed())
2487 2486 linkrev = len(self)
2488 2487 self.ui.note(_("committing files:\n"))
2489 2488 for f in sorted(ctx.modified() + ctx.added()):
2490 2489 self.ui.note(f + "\n")
2491 2490 try:
2492 2491 fctx = ctx[f]
2493 2492 if fctx is None:
2494 2493 removed.append(f)
2495 2494 else:
2496 2495 added.append(f)
2497 2496 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2498 2497 trp, changed)
2499 2498 m.setflag(f, fctx.flags())
2500 2499 except OSError as inst:
2501 2500 self.ui.warn(_("trouble committing %s!\n") % f)
2502 2501 raise
2503 2502 except IOError as inst:
2504 2503 errcode = getattr(inst, 'errno', errno.ENOENT)
2505 2504 if error or errcode and errcode != errno.ENOENT:
2506 2505 self.ui.warn(_("trouble committing %s!\n") % f)
2507 2506 raise
2508 2507
2509 2508 # update manifest
2510 2509 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2511 2510 drop = [f for f in removed if f in m]
2512 2511 for f in drop:
2513 2512 del m[f]
2514 2513 files = changed + removed
2515 2514 md = None
2516 2515 if not files:
2517 2516 # if no "files" actually changed in terms of the changelog,
2518 2517 # try hard to detect unmodified manifest entry so that the
2519 2518 # exact same commit can be reproduced later on convert.
2520 2519 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2521 2520 if not files and md:
2522 2521 self.ui.debug('not reusing manifest (no file change in '
2523 2522 'changelog, but manifest differs)\n')
2524 2523 if files or md:
2525 2524 self.ui.note(_("committing manifest\n"))
2526 2525 # we're using narrowmatch here since it's already applied at
2527 2526 # other stages (such as dirstate.walk), so we're already
2528 2527 # ignoring things outside of narrowspec in most cases. The
2529 2528 # one case where we might have files outside the narrowspec
2530 2529 # at this point is merges, and we already error out in the
2531 2530 # case where the merge has files outside of the narrowspec,
2532 2531 # so this is safe.
2533 2532 mn = mctx.write(trp, linkrev,
2534 2533 p1.manifestnode(), p2.manifestnode(),
2535 2534 added, drop, match=self.narrowmatch())
2536 2535 else:
2537 2536 self.ui.debug('reusing manifest form p1 (listed files '
2538 2537 'actually unchanged)\n')
2539 2538 mn = p1.manifestnode()
2540 2539 else:
2541 2540 self.ui.debug('reusing manifest from p1 (no file change)\n')
2542 2541 mn = p1.manifestnode()
2543 2542 files = []
2544 2543
2545 2544 # update changelog
2546 2545 self.ui.note(_("committing changelog\n"))
2547 2546 self.changelog.delayupdate(tr)
2548 2547 n = self.changelog.add(mn, files, ctx.description(),
2549 2548 trp, p1.node(), p2.node(),
2550 2549 user, ctx.date(), ctx.extra().copy())
2551 2550 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2552 2551 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2553 2552 parent2=xp2)
2554 2553 # set the new commit is proper phase
2555 2554 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2556 2555 if targetphase:
2557 2556 # retract boundary do not alter parent changeset.
2558 2557 # if a parent have higher the resulting phase will
2559 2558 # be compliant anyway
2560 2559 #
2561 2560 # if minimal phase was 0 we don't need to retract anything
2562 2561 phases.registernew(self, tr, targetphase, [n])
2563 2562 tr.close()
2564 2563 return n
2565 2564 finally:
2566 2565 if tr:
2567 2566 tr.release()
2568 2567 lock.release()
2569 2568
2570 2569 @unfilteredmethod
2571 2570 def destroying(self):
2572 2571 '''Inform the repository that nodes are about to be destroyed.
2573 2572 Intended for use by strip and rollback, so there's a common
2574 2573 place for anything that has to be done before destroying history.
2575 2574
2576 2575 This is mostly useful for saving state that is in memory and waiting
2577 2576 to be flushed when the current lock is released. Because a call to
2578 2577 destroyed is imminent, the repo will be invalidated causing those
2579 2578 changes to stay in memory (waiting for the next unlock), or vanish
2580 2579 completely.
2581 2580 '''
2582 2581 # When using the same lock to commit and strip, the phasecache is left
2583 2582 # dirty after committing. Then when we strip, the repo is invalidated,
2584 2583 # causing those changes to disappear.
2585 2584 if '_phasecache' in vars(self):
2586 2585 self._phasecache.write()
2587 2586
2588 2587 @unfilteredmethod
2589 2588 def destroyed(self):
2590 2589 '''Inform the repository that nodes have been destroyed.
2591 2590 Intended for use by strip and rollback, so there's a common
2592 2591 place for anything that has to be done after destroying history.
2593 2592 '''
2594 2593 # When one tries to:
2595 2594 # 1) destroy nodes thus calling this method (e.g. strip)
2596 2595 # 2) use phasecache somewhere (e.g. commit)
2597 2596 #
2598 2597 # then 2) will fail because the phasecache contains nodes that were
2599 2598 # removed. We can either remove phasecache from the filecache,
2600 2599 # causing it to reload next time it is accessed, or simply filter
2601 2600 # the removed nodes now and write the updated cache.
2602 2601 self._phasecache.filterunknown(self)
2603 2602 self._phasecache.write()
2604 2603
2605 2604 # refresh all repository caches
2606 2605 self.updatecaches()
2607 2606
2608 2607 # Ensure the persistent tag cache is updated. Doing it now
2609 2608 # means that the tag cache only has to worry about destroyed
2610 2609 # heads immediately after a strip/rollback. That in turn
2611 2610 # guarantees that "cachetip == currenttip" (comparing both rev
2612 2611 # and node) always means no nodes have been added or destroyed.
2613 2612
2614 2613 # XXX this is suboptimal when qrefresh'ing: we strip the current
2615 2614 # head, refresh the tag cache, then immediately add a new head.
2616 2615 # But I think doing it this way is necessary for the "instant
2617 2616 # tag cache retrieval" case to work.
2618 2617 self.invalidate()
2619 2618
2620 2619 def status(self, node1='.', node2=None, match=None,
2621 2620 ignored=False, clean=False, unknown=False,
2622 2621 listsubrepos=False):
2623 2622 '''a convenience method that calls node1.status(node2)'''
2624 2623 return self[node1].status(node2, match, ignored, clean, unknown,
2625 2624 listsubrepos)
2626 2625
2627 2626 def addpostdsstatus(self, ps):
2628 2627 """Add a callback to run within the wlock, at the point at which status
2629 2628 fixups happen.
2630 2629
2631 2630 On status completion, callback(wctx, status) will be called with the
2632 2631 wlock held, unless the dirstate has changed from underneath or the wlock
2633 2632 couldn't be grabbed.
2634 2633
2635 2634 Callbacks should not capture and use a cached copy of the dirstate --
2636 2635 it might change in the meanwhile. Instead, they should access the
2637 2636 dirstate via wctx.repo().dirstate.
2638 2637
2639 2638 This list is emptied out after each status run -- extensions should
2640 2639 make sure it adds to this list each time dirstate.status is called.
2641 2640 Extensions should also make sure they don't call this for statuses
2642 2641 that don't involve the dirstate.
2643 2642 """
2644 2643
2645 2644 # The list is located here for uniqueness reasons -- it is actually
2646 2645 # managed by the workingctx, but that isn't unique per-repo.
2647 2646 self._postdsstatus.append(ps)
2648 2647
2649 2648 def postdsstatus(self):
2650 2649 """Used by workingctx to get the list of post-dirstate-status hooks."""
2651 2650 return self._postdsstatus
2652 2651
2653 2652 def clearpostdsstatus(self):
2654 2653 """Used by workingctx to clear post-dirstate-status hooks."""
2655 2654 del self._postdsstatus[:]
2656 2655
2657 2656 def heads(self, start=None):
2658 2657 if start is None:
2659 2658 cl = self.changelog
2660 2659 headrevs = reversed(cl.headrevs())
2661 2660 return [cl.node(rev) for rev in headrevs]
2662 2661
2663 2662 heads = self.changelog.heads(start)
2664 2663 # sort the output in rev descending order
2665 2664 return sorted(heads, key=self.changelog.rev, reverse=True)
2666 2665
2667 2666 def branchheads(self, branch=None, start=None, closed=False):
2668 2667 '''return a (possibly filtered) list of heads for the given branch
2669 2668
2670 2669 Heads are returned in topological order, from newest to oldest.
2671 2670 If branch is None, use the dirstate branch.
2672 2671 If start is not None, return only heads reachable from start.
2673 2672 If closed is True, return heads that are marked as closed as well.
2674 2673 '''
2675 2674 if branch is None:
2676 2675 branch = self[None].branch()
2677 2676 branches = self.branchmap()
2678 2677 if branch not in branches:
2679 2678 return []
2680 2679 # the cache returns heads ordered lowest to highest
2681 2680 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2682 2681 if start is not None:
2683 2682 # filter out the heads that cannot be reached from startrev
2684 2683 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2685 2684 bheads = [h for h in bheads if h in fbheads]
2686 2685 return bheads
2687 2686
2688 2687 def branches(self, nodes):
2689 2688 if not nodes:
2690 2689 nodes = [self.changelog.tip()]
2691 2690 b = []
2692 2691 for n in nodes:
2693 2692 t = n
2694 2693 while True:
2695 2694 p = self.changelog.parents(n)
2696 2695 if p[1] != nullid or p[0] == nullid:
2697 2696 b.append((t, n, p[0], p[1]))
2698 2697 break
2699 2698 n = p[0]
2700 2699 return b
2701 2700
2702 2701 def between(self, pairs):
2703 2702 r = []
2704 2703
2705 2704 for top, bottom in pairs:
2706 2705 n, l, i = top, [], 0
2707 2706 f = 1
2708 2707
2709 2708 while n != bottom and n != nullid:
2710 2709 p = self.changelog.parents(n)[0]
2711 2710 if i == f:
2712 2711 l.append(n)
2713 2712 f = f * 2
2714 2713 n = p
2715 2714 i += 1
2716 2715
2717 2716 r.append(l)
2718 2717
2719 2718 return r
2720 2719
2721 2720 def checkpush(self, pushop):
2722 2721 """Extensions can override this function if additional checks have
2723 2722 to be performed before pushing, or call it if they override push
2724 2723 command.
2725 2724 """
2726 2725
2727 2726 @unfilteredpropertycache
2728 2727 def prepushoutgoinghooks(self):
2729 2728 """Return util.hooks consists of a pushop with repo, remote, outgoing
2730 2729 methods, which are called before pushing changesets.
2731 2730 """
2732 2731 return util.hooks()
2733 2732
2734 2733 def pushkey(self, namespace, key, old, new):
2735 2734 try:
2736 2735 tr = self.currenttransaction()
2737 2736 hookargs = {}
2738 2737 if tr is not None:
2739 2738 hookargs.update(tr.hookargs)
2740 2739 hookargs = pycompat.strkwargs(hookargs)
2741 2740 hookargs[r'namespace'] = namespace
2742 2741 hookargs[r'key'] = key
2743 2742 hookargs[r'old'] = old
2744 2743 hookargs[r'new'] = new
2745 2744 self.hook('prepushkey', throw=True, **hookargs)
2746 2745 except error.HookAbort as exc:
2747 2746 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2748 2747 if exc.hint:
2749 2748 self.ui.write_err(_("(%s)\n") % exc.hint)
2750 2749 return False
2751 2750 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2752 2751 ret = pushkey.push(self, namespace, key, old, new)
2753 2752 def runhook():
2754 2753 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2755 2754 ret=ret)
2756 2755 self._afterlock(runhook)
2757 2756 return ret
2758 2757
2759 2758 def listkeys(self, namespace):
2760 2759 self.hook('prelistkeys', throw=True, namespace=namespace)
2761 2760 self.ui.debug('listing keys for "%s"\n' % namespace)
2762 2761 values = pushkey.list(self, namespace)
2763 2762 self.hook('listkeys', namespace=namespace, values=values)
2764 2763 return values
2765 2764
2766 2765 def debugwireargs(self, one, two, three=None, four=None, five=None):
2767 2766 '''used to test argument passing over the wire'''
2768 2767 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2769 2768 pycompat.bytestr(four),
2770 2769 pycompat.bytestr(five))
2771 2770
2772 2771 def savecommitmessage(self, text):
2773 2772 fp = self.vfs('last-message.txt', 'wb')
2774 2773 try:
2775 2774 fp.write(text)
2776 2775 finally:
2777 2776 fp.close()
2778 2777 return self.pathto(fp.name[len(self.root) + 1:])
2779 2778
2780 2779 # used to avoid circular references so destructors work
2781 2780 def aftertrans(files):
2782 2781 renamefiles = [tuple(t) for t in files]
2783 2782 def a():
2784 2783 for vfs, src, dest in renamefiles:
2785 2784 # if src and dest refer to a same file, vfs.rename is a no-op,
2786 2785 # leaving both src and dest on disk. delete dest to make sure
2787 2786 # the rename couldn't be such a no-op.
2788 2787 vfs.tryunlink(dest)
2789 2788 try:
2790 2789 vfs.rename(src, dest)
2791 2790 except OSError: # journal file does not yet exist
2792 2791 pass
2793 2792 return a
2794 2793
2795 2794 def undoname(fn):
2796 2795 base, name = os.path.split(fn)
2797 2796 assert name.startswith('journal')
2798 2797 return os.path.join(base, name.replace('journal', 'undo', 1))
2799 2798
2800 2799 def instance(ui, path, create, intents=None, createopts=None):
2801 2800 localpath = util.urllocalpath(path)
2802 2801 if create:
2803 2802 createrepository(ui, localpath, createopts=createopts)
2804 2803
2805 2804 return makelocalrepository(ui, localpath, intents=intents)
2806 2805
2807 2806 def islocal(path):
2808 2807 return True
2809 2808
2810 2809 def newreporequirements(ui, createopts=None):
2811 2810 """Determine the set of requirements for a new local repository.
2812 2811
2813 2812 Extensions can wrap this function to specify custom requirements for
2814 2813 new repositories.
2815 2814 """
2816 2815 createopts = createopts or {}
2817 2816
2818 2817 # If the repo is being created from a shared repository, we copy
2819 2818 # its requirements.
2820 2819 if 'sharedrepo' in createopts:
2821 2820 requirements = set(createopts['sharedrepo'].requirements)
2822 2821 if createopts.get('sharedrelative'):
2823 2822 requirements.add('relshared')
2824 2823 else:
2825 2824 requirements.add('shared')
2826 2825
2827 2826 return requirements
2828 2827
2829 2828 requirements = {'revlogv1'}
2830 2829 if ui.configbool('format', 'usestore'):
2831 2830 requirements.add('store')
2832 2831 if ui.configbool('format', 'usefncache'):
2833 2832 requirements.add('fncache')
2834 2833 if ui.configbool('format', 'dotencode'):
2835 2834 requirements.add('dotencode')
2836 2835
2837 2836 compengine = ui.config('experimental', 'format.compression')
2838 2837 if compengine not in util.compengines:
2839 2838 raise error.Abort(_('compression engine %s defined by '
2840 2839 'experimental.format.compression not available') %
2841 2840 compengine,
2842 2841 hint=_('run "hg debuginstall" to list available '
2843 2842 'compression engines'))
2844 2843
2845 2844 # zlib is the historical default and doesn't need an explicit requirement.
2846 2845 if compengine != 'zlib':
2847 2846 requirements.add('exp-compression-%s' % compengine)
2848 2847
2849 2848 if scmutil.gdinitconfig(ui):
2850 2849 requirements.add('generaldelta')
2851 2850 if ui.configbool('experimental', 'treemanifest'):
2852 2851 requirements.add('treemanifest')
2853 2852 # experimental config: format.sparse-revlog
2854 2853 if ui.configbool('format', 'sparse-revlog'):
2855 2854 requirements.add(SPARSEREVLOG_REQUIREMENT)
2856 2855
2857 2856 revlogv2 = ui.config('experimental', 'revlogv2')
2858 2857 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2859 2858 requirements.remove('revlogv1')
2860 2859 # generaldelta is implied by revlogv2.
2861 2860 requirements.discard('generaldelta')
2862 2861 requirements.add(REVLOGV2_REQUIREMENT)
2863 2862 # experimental config: format.internal-phase
2864 2863 if ui.configbool('format', 'internal-phase'):
2865 2864 requirements.add('internal-phase')
2866 2865
2867 2866 if createopts.get('narrowfiles'):
2868 2867 requirements.add(repository.NARROW_REQUIREMENT)
2869 2868
2870 2869 return requirements
2871 2870
2872 2871 def filterknowncreateopts(ui, createopts):
2873 2872 """Filters a dict of repo creation options against options that are known.
2874 2873
2875 2874 Receives a dict of repo creation options and returns a dict of those
2876 2875 options that we don't know how to handle.
2877 2876
2878 2877 This function is called as part of repository creation. If the
2879 2878 returned dict contains any items, repository creation will not
2880 2879 be allowed, as it means there was a request to create a repository
2881 2880 with options not recognized by loaded code.
2882 2881
2883 2882 Extensions can wrap this function to filter out creation options
2884 2883 they know how to handle.
2885 2884 """
2886 2885 known = {
2887 2886 'narrowfiles',
2888 2887 'sharedrepo',
2889 2888 'sharedrelative',
2890 2889 'shareditems',
2891 2890 }
2892 2891
2893 2892 return {k: v for k, v in createopts.items() if k not in known}
2894 2893
2895 2894 def createrepository(ui, path, createopts=None):
2896 2895 """Create a new repository in a vfs.
2897 2896
2898 2897 ``path`` path to the new repo's working directory.
2899 2898 ``createopts`` options for the new repository.
2900 2899
2901 2900 The following keys for ``createopts`` are recognized:
2902 2901
2903 2902 narrowfiles
2904 2903 Set up repository to support narrow file storage.
2905 2904 sharedrepo
2906 2905 Repository object from which storage should be shared.
2907 2906 sharedrelative
2908 2907 Boolean indicating if the path to the shared repo should be
2909 2908 stored as relative. By default, the pointer to the "parent" repo
2910 2909 is stored as an absolute path.
2911 2910 shareditems
2912 2911 Set of items to share to the new repository (in addition to storage).
2913 2912 """
2914 2913 createopts = createopts or {}
2915 2914
2916 2915 unknownopts = filterknowncreateopts(ui, createopts)
2917 2916
2918 2917 if not isinstance(unknownopts, dict):
2919 2918 raise error.ProgrammingError('filterknowncreateopts() did not return '
2920 2919 'a dict')
2921 2920
2922 2921 if unknownopts:
2923 2922 raise error.Abort(_('unable to create repository because of unknown '
2924 2923 'creation option: %s') %
2925 2924 ', '.join(sorted(unknownopts)),
2926 2925 hint=_('is a required extension not loaded?'))
2927 2926
2928 2927 requirements = newreporequirements(ui, createopts=createopts)
2929 2928
2930 2929 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2931 2930
2932 2931 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2933 2932 if hgvfs.exists():
2934 2933 raise error.RepoError(_('repository %s already exists') % path)
2935 2934
2936 2935 if 'sharedrepo' in createopts:
2937 2936 sharedpath = createopts['sharedrepo'].sharedpath
2938 2937
2939 2938 if createopts.get('sharedrelative'):
2940 2939 try:
2941 2940 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2942 2941 except (IOError, ValueError) as e:
2943 2942 # ValueError is raised on Windows if the drive letters differ
2944 2943 # on each path.
2945 2944 raise error.Abort(_('cannot calculate relative path'),
2946 2945 hint=stringutil.forcebytestr(e))
2947 2946
2948 2947 if not wdirvfs.exists():
2949 2948 wdirvfs.makedirs()
2950 2949
2951 2950 hgvfs.makedir(notindexed=True)
2952 2951
2953 2952 if b'store' in requirements and 'sharedrepo' not in createopts:
2954 2953 hgvfs.mkdir(b'store')
2955 2954
2956 2955 # We create an invalid changelog outside the store so very old
2957 2956 # Mercurial versions (which didn't know about the requirements
2958 2957 # file) encounter an error on reading the changelog. This
2959 2958 # effectively locks out old clients and prevents them from
2960 2959 # mucking with a repo in an unknown format.
2961 2960 #
2962 2961 # The revlog header has version 2, which won't be recognized by
2963 2962 # such old clients.
2964 2963 hgvfs.append(b'00changelog.i',
2965 2964 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2966 2965 b'layout')
2967 2966
2968 2967 scmutil.writerequires(hgvfs, requirements)
2969 2968
2970 2969 # Write out file telling readers where to find the shared store.
2971 2970 if 'sharedrepo' in createopts:
2972 2971 hgvfs.write(b'sharedpath', sharedpath)
2973 2972
2974 2973 if createopts.get('shareditems'):
2975 2974 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2976 2975 hgvfs.write(b'shared', shared)
2977 2976
2978 2977 def poisonrepository(repo):
2979 2978 """Poison a repository instance so it can no longer be used."""
2980 2979 # Perform any cleanup on the instance.
2981 2980 repo.close()
2982 2981
2983 2982 # Our strategy is to replace the type of the object with one that
2984 2983 # has all attribute lookups result in error.
2985 2984 #
2986 2985 # But we have to allow the close() method because some constructors
2987 2986 # of repos call close() on repo references.
2988 2987 class poisonedrepository(object):
2989 2988 def __getattribute__(self, item):
2990 2989 if item == r'close':
2991 2990 return object.__getattribute__(self, item)
2992 2991
2993 2992 raise error.ProgrammingError('repo instances should not be used '
2994 2993 'after unshare')
2995 2994
2996 2995 def close(self):
2997 2996 pass
2998 2997
2999 2998 # We may have a repoview, which intercepts __setattr__. So be sure
3000 2999 # we operate at the lowest level possible.
3001 3000 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,580 +1,579 b''
1 1 $ . "$TESTDIR/histedit-helpers.sh"
2 2
3 3 Enable obsolete
4 4
5 5 $ cat >> $HGRCPATH << EOF
6 6 > [ui]
7 7 > logtemplate= {rev}:{node|short} {desc|firstline}
8 8 > [phases]
9 9 > publish=False
10 10 > [experimental]
11 11 > evolution.createmarkers=True
12 12 > evolution.allowunstable=True
13 13 > [extensions]
14 14 > histedit=
15 15 > rebase=
16 16 > EOF
17 17
18 18 Test that histedit learns about obsolescence not stored in histedit state
19 19 $ hg init boo
20 20 $ cd boo
21 21 $ echo a > a
22 22 $ hg ci -Am a
23 23 adding a
24 24 $ echo a > b
25 25 $ echo a > c
26 26 $ echo a > c
27 27 $ hg ci -Am b
28 28 adding b
29 29 adding c
30 30 $ echo a > d
31 31 $ hg ci -Am c
32 32 adding d
33 33 $ echo "pick `hg log -r 0 -T '{node|short}'`" > plan
34 34 $ echo "pick `hg log -r 2 -T '{node|short}'`" >> plan
35 35 $ echo "edit `hg log -r 1 -T '{node|short}'`" >> plan
36 36 $ hg histedit -r 'all()' --commands plan
37 37 Editing (1b2d564fad96), you may commit or record as needed now.
38 38 (hg histedit --continue to resume)
39 39 [1]
40 40 $ hg st
41 41 A b
42 42 A c
43 43 ? plan
44 44 $ hg commit --amend b
45 45 $ hg histedit --continue
46 46 $ hg log -G
47 47 @ 5:46abc7c4d873 b
48 48 |
49 49 o 4:49d44ab2be1b c
50 50 |
51 51 o 0:cb9a9f314b8b a
52 52
53 53 $ hg debugobsolete
54 54 e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
55 55 1b2d564fad96311b45362f17c2aa855150efb35f 46abc7c4d8738e8563e577f7889e1b6db3da4199 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
56 56 114f4176969ef342759a8a57e6bccefc4234829b 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
57 57
58 58 With some node gone missing during the edit.
59 59
60 60 $ echo "pick `hg log -r 0 -T '{node|short}'`" > plan
61 61 $ echo "pick `hg log -r 5 -T '{node|short}'`" >> plan
62 62 $ echo "edit `hg log -r 4 -T '{node|short}'`" >> plan
63 63 $ hg histedit -r 'all()' --commands plan
64 64 Editing (49d44ab2be1b), you may commit or record as needed now.
65 65 (hg histedit --continue to resume)
66 66 [1]
67 67 $ hg st
68 68 A b
69 69 A d
70 70 ? plan
71 71 $ hg commit --amend -X . -m XXXXXX
72 72 $ hg commit --amend -X . -m b2
73 73 $ hg --hidden --config extensions.strip= strip 'desc(XXXXXX)' --no-backup
74 warning: ignoring unknown working parent aba7da937030!
75 74 $ hg histedit --continue
76 75 $ hg log -G
77 76 @ 8:273c1f3b8626 c
78 77 |
79 78 o 7:aba7da937030 b2
80 79 |
81 80 o 0:cb9a9f314b8b a
82 81
83 82 $ hg debugobsolete
84 83 e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
85 84 1b2d564fad96311b45362f17c2aa855150efb35f 46abc7c4d8738e8563e577f7889e1b6db3da4199 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
86 85 114f4176969ef342759a8a57e6bccefc4234829b 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
87 86 76f72745eac0643d16530e56e2f86e36e40631f1 2ca853e48edbd6453a0674dc0fe28a0974c51b9c 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'test'}
88 87 2ca853e48edbd6453a0674dc0fe28a0974c51b9c aba7da93703075eec9fb1dbaf143ff2bc1c49d46 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'test'}
89 88 49d44ab2be1b67a79127568a67c9c99430633b48 273c1f3b86267ed3ec684bb13af1fa4d6ba56e02 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
90 89 46abc7c4d8738e8563e577f7889e1b6db3da4199 aba7da93703075eec9fb1dbaf143ff2bc1c49d46 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '5', 'operation': 'histedit', 'user': 'test'}
91 90 $ cd ..
92 91
93 92 Base setup for the rest of the testing
94 93 ======================================
95 94
96 95 $ hg init base
97 96 $ cd base
98 97
99 98 $ for x in a b c d e f ; do
100 99 > echo $x > $x
101 100 > hg add $x
102 101 > hg ci -m $x
103 102 > done
104 103
105 104 $ hg log --graph
106 105 @ 5:652413bf663e f
107 106 |
108 107 o 4:e860deea161a e
109 108 |
110 109 o 3:055a42cdd887 d
111 110 |
112 111 o 2:177f92b77385 c
113 112 |
114 113 o 1:d2ae7f538514 b
115 114 |
116 115 o 0:cb9a9f314b8b a
117 116
118 117
119 118 $ HGEDITOR=cat hg histedit 1
120 119 pick d2ae7f538514 1 b
121 120 pick 177f92b77385 2 c
122 121 pick 055a42cdd887 3 d
123 122 pick e860deea161a 4 e
124 123 pick 652413bf663e 5 f
125 124
126 125 # Edit history between d2ae7f538514 and 652413bf663e
127 126 #
128 127 # Commits are listed from least to most recent
129 128 #
130 129 # You can reorder changesets by reordering the lines
131 130 #
132 131 # Commands:
133 132 #
134 133 # e, edit = use commit, but stop for amending
135 134 # m, mess = edit commit message without changing commit content
136 135 # p, pick = use commit
137 136 # b, base = checkout changeset and apply further changesets from there
138 137 # d, drop = remove commit from history
139 138 # f, fold = use commit, but combine it with the one above
140 139 # r, roll = like fold, but discard this commit's description and date
141 140 #
142 141 $ hg histedit 1 --commands - --verbose <<EOF | grep histedit
143 142 > pick 177f92b77385 2 c
144 143 > drop d2ae7f538514 1 b
145 144 > pick 055a42cdd887 3 d
146 145 > fold e860deea161a 4 e
147 146 > pick 652413bf663e 5 f
148 147 > EOF
149 148 [1]
150 149 $ hg log --graph --hidden
151 150 @ 10:cacdfd884a93 f
152 151 |
153 152 o 9:59d9f330561f d
154 153 |
155 154 | x 8:b558abc46d09 fold-temp-revision e860deea161a
156 155 | |
157 156 | x 7:96e494a2d553 d
158 157 |/
159 158 o 6:b346ab9a313d c
160 159 |
161 160 | x 5:652413bf663e f
162 161 | |
163 162 | x 4:e860deea161a e
164 163 | |
165 164 | x 3:055a42cdd887 d
166 165 | |
167 166 | x 2:177f92b77385 c
168 167 | |
169 168 | x 1:d2ae7f538514 b
170 169 |/
171 170 o 0:cb9a9f314b8b a
172 171
173 172 $ hg debugobsolete
174 173 d2ae7f538514cd87c17547b0de4cea71fe1af9fb 0 {cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'histedit', 'user': 'test'}
175 174 177f92b773850b59254aa5e923436f921b55483b b346ab9a313db8537ecf96fca3ca3ca984ef3bd7 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
176 175 055a42cdd88768532f9cf79daa407fc8d138de9b 59d9f330561fd6c88b1a6b32f0e45034d88db784 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'histedit', 'user': 'test'}
177 176 e860deea161a2f77de56603b340ebbb4536308ae 59d9f330561fd6c88b1a6b32f0e45034d88db784 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'histedit', 'user': 'test'}
178 177 652413bf663ef2a641cab26574e46d5f5a64a55a cacdfd884a9321ec4e1de275ef3949fa953a1f83 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
179 178 96e494a2d553dd05902ba1cee1d94d4cb7b8faed 0 {b346ab9a313db8537ecf96fca3ca3ca984ef3bd7} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'histedit', 'user': 'test'}
180 179 b558abc46d09c30f57ac31e85a8a3d64d2e906e4 0 {96e494a2d553dd05902ba1cee1d94d4cb7b8faed} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'histedit', 'user': 'test'}
181 180
182 181
183 182 Ensure hidden revision does not prevent histedit
184 183 -------------------------------------------------
185 184
186 185 create an hidden revision
187 186
188 187 $ hg histedit 6 --commands - << EOF
189 188 > pick b346ab9a313d 6 c
190 189 > drop 59d9f330561f 7 d
191 190 > pick cacdfd884a93 8 f
192 191 > EOF
193 192 $ hg log --graph
194 193 @ 11:c13eb81022ca f
195 194 |
196 195 o 6:b346ab9a313d c
197 196 |
198 197 o 0:cb9a9f314b8b a
199 198
200 199 check hidden revision are ignored (6 have hidden children 7 and 8)
201 200
202 201 $ hg histedit 6 --commands - << EOF
203 202 > pick b346ab9a313d 6 c
204 203 > pick c13eb81022ca 8 f
205 204 > EOF
206 205
207 206
208 207
209 208 Test that rewriting leaving instability behind is allowed
210 209 ---------------------------------------------------------------------
211 210
212 211 $ hg up '.^'
213 212 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
214 213 $ hg log -r 'children(.)'
215 214 11:c13eb81022ca f (no-eol)
216 215 $ hg histedit -r '.' --commands - <<EOF
217 216 > edit b346ab9a313d 6 c
218 217 > EOF
219 218 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
220 219 adding c
221 220 Editing (b346ab9a313d), you may commit or record as needed now.
222 221 (hg histedit --continue to resume)
223 222 [1]
224 223 $ echo c >> c
225 224 $ hg histedit --continue
226 225 1 new orphan changesets
227 226
228 227 $ hg log -r 'orphan()'
229 228 11:c13eb81022ca f (no-eol)
230 229
231 230 stabilise
232 231
233 232 $ hg rebase -r 'orphan()' -d .
234 233 rebasing 11:c13eb81022ca "f"
235 234 $ hg up tip -q
236 235
237 236 Test dropping of changeset on the top of the stack
238 237 -------------------------------------------------------
239 238
240 239 Nothing is rewritten below, the working directory parent must be change for the
241 240 dropped changeset to be hidden.
242 241
243 242 $ cd ..
244 243 $ hg clone base droplast
245 244 updating to branch default
246 245 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 246 $ cd droplast
248 247 $ hg histedit -r '40db8afa467b' --commands - << EOF
249 248 > pick 40db8afa467b 10 c
250 249 > drop b449568bf7fc 11 f
251 250 > EOF
252 251 $ hg log -G
253 252 @ 12:40db8afa467b c
254 253 |
255 254 o 0:cb9a9f314b8b a
256 255
257 256
258 257 With rewritten ancestors
259 258
260 259 $ echo e > e
261 260 $ hg add e
262 261 $ hg commit -m g
263 262 $ echo f > f
264 263 $ hg add f
265 264 $ hg commit -m h
266 265 $ hg histedit -r '40db8afa467b' --commands - << EOF
267 266 > pick 47a8561c0449 12 g
268 267 > pick 40db8afa467b 10 c
269 268 > drop 1b3b05f35ff0 13 h
270 269 > EOF
271 270 $ hg log -G
272 271 @ 17:ee6544123ab8 c
273 272 |
274 273 o 16:269e713e9eae g
275 274 |
276 275 o 0:cb9a9f314b8b a
277 276
278 277 $ cd ../base
279 278
280 279
281 280
282 281 Test phases support
283 282 ===========================================
284 283
285 284 Check that histedit respect immutability
286 285 -------------------------------------------
287 286
288 287 $ cat >> $HGRCPATH << EOF
289 288 > [ui]
290 289 > logtemplate= {rev}:{node|short} ({phase}) {desc|firstline}\n
291 290 > EOF
292 291
293 292 $ hg ph -pv '.^'
294 293 phase changed for 2 changesets
295 294 $ hg log -G
296 295 @ 13:b449568bf7fc (draft) f
297 296 |
298 297 o 12:40db8afa467b (public) c
299 298 |
300 299 o 0:cb9a9f314b8b (public) a
301 300
302 301 $ hg histedit -r '.~2'
303 302 abort: cannot edit public changeset: cb9a9f314b8b
304 303 (see 'hg help phases' for details)
305 304 [255]
306 305
307 306
308 307 Prepare further testing
309 308 -------------------------------------------
310 309
311 310 $ for x in g h i j k ; do
312 311 > echo $x > $x
313 312 > hg add $x
314 313 > hg ci -m $x
315 314 > done
316 315 $ hg phase --force --secret .~2
317 316 $ hg log -G
318 317 @ 18:ee118ab9fa44 (secret) k
319 318 |
320 319 o 17:3a6c53ee7f3d (secret) j
321 320 |
322 321 o 16:b605fb7503f2 (secret) i
323 322 |
324 323 o 15:7395e1ff83bd (draft) h
325 324 |
326 325 o 14:6b70183d2492 (draft) g
327 326 |
328 327 o 13:b449568bf7fc (draft) f
329 328 |
330 329 o 12:40db8afa467b (public) c
331 330 |
332 331 o 0:cb9a9f314b8b (public) a
333 332
334 333 $ cd ..
335 334
336 335 simple phase conservation
337 336 -------------------------------------------
338 337
339 338 Resulting changeset should conserve the phase of the original one whatever the
340 339 phases.new-commit option is.
341 340
342 341 New-commit as draft (default)
343 342
344 343 $ cp -R base simple-draft
345 344 $ cd simple-draft
346 345 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
347 346 > edit b449568bf7fc 11 f
348 347 > pick 6b70183d2492 12 g
349 348 > pick 7395e1ff83bd 13 h
350 349 > pick b605fb7503f2 14 i
351 350 > pick 3a6c53ee7f3d 15 j
352 351 > pick ee118ab9fa44 16 k
353 352 > EOF
354 353 0 files updated, 0 files merged, 6 files removed, 0 files unresolved
355 354 adding f
356 355 Editing (b449568bf7fc), you may commit or record as needed now.
357 356 (hg histedit --continue to resume)
358 357 [1]
359 358 $ echo f >> f
360 359 $ hg histedit --continue
361 360 $ hg log -G
362 361 @ 24:12e89af74238 (secret) k
363 362 |
364 363 o 23:636a8687b22e (secret) j
365 364 |
366 365 o 22:ccaf0a38653f (secret) i
367 366 |
368 367 o 21:11a89d1c2613 (draft) h
369 368 |
370 369 o 20:c1dec7ca82ea (draft) g
371 370 |
372 371 o 19:087281e68428 (draft) f
373 372 |
374 373 o 12:40db8afa467b (public) c
375 374 |
376 375 o 0:cb9a9f314b8b (public) a
377 376
378 377 $ cd ..
379 378
380 379
381 380 New-commit as secret (config)
382 381
383 382 $ cp -R base simple-secret
384 383 $ cd simple-secret
385 384 $ cat >> .hg/hgrc << EOF
386 385 > [phases]
387 386 > new-commit=secret
388 387 > EOF
389 388 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
390 389 > edit b449568bf7fc 11 f
391 390 > pick 6b70183d2492 12 g
392 391 > pick 7395e1ff83bd 13 h
393 392 > pick b605fb7503f2 14 i
394 393 > pick 3a6c53ee7f3d 15 j
395 394 > pick ee118ab9fa44 16 k
396 395 > EOF
397 396 0 files updated, 0 files merged, 6 files removed, 0 files unresolved
398 397 adding f
399 398 Editing (b449568bf7fc), you may commit or record as needed now.
400 399 (hg histedit --continue to resume)
401 400 [1]
402 401 $ echo f >> f
403 402 $ hg histedit --continue
404 403 $ hg log -G
405 404 @ 24:12e89af74238 (secret) k
406 405 |
407 406 o 23:636a8687b22e (secret) j
408 407 |
409 408 o 22:ccaf0a38653f (secret) i
410 409 |
411 410 o 21:11a89d1c2613 (draft) h
412 411 |
413 412 o 20:c1dec7ca82ea (draft) g
414 413 |
415 414 o 19:087281e68428 (draft) f
416 415 |
417 416 o 12:40db8afa467b (public) c
418 417 |
419 418 o 0:cb9a9f314b8b (public) a
420 419
421 420 $ cd ..
422 421
423 422
424 423 Changeset reordering
425 424 -------------------------------------------
426 425
427 426 If a secret changeset is put before a draft one, all descendant should be secret.
428 427 It seems more important to present the secret phase.
429 428
430 429 $ cp -R base reorder
431 430 $ cd reorder
432 431 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
433 432 > pick b449568bf7fc 11 f
434 433 > pick 3a6c53ee7f3d 15 j
435 434 > pick 6b70183d2492 12 g
436 435 > pick b605fb7503f2 14 i
437 436 > pick 7395e1ff83bd 13 h
438 437 > pick ee118ab9fa44 16 k
439 438 > EOF
440 439 $ hg log -G
441 440 @ 23:558246857888 (secret) k
442 441 |
443 442 o 22:28bd44768535 (secret) h
444 443 |
445 444 o 21:d5395202aeb9 (secret) i
446 445 |
447 446 o 20:21edda8e341b (secret) g
448 447 |
449 448 o 19:5ab64f3a4832 (secret) j
450 449 |
451 450 o 13:b449568bf7fc (draft) f
452 451 |
453 452 o 12:40db8afa467b (public) c
454 453 |
455 454 o 0:cb9a9f314b8b (public) a
456 455
457 456 $ cd ..
458 457
459 458 Changeset folding
460 459 -------------------------------------------
461 460
462 461 Folding a secret changeset with a draft one turn the result secret (again,
463 462 better safe than sorry). Folding between same phase changeset still works
464 463
465 464 Note that there is a few reordering in this series for more extensive test
466 465
467 466 $ cp -R base folding
468 467 $ cd folding
469 468 $ cat >> .hg/hgrc << EOF
470 469 > [phases]
471 470 > new-commit=secret
472 471 > EOF
473 472 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
474 473 > pick 7395e1ff83bd 13 h
475 474 > fold b449568bf7fc 11 f
476 475 > pick 6b70183d2492 12 g
477 476 > fold 3a6c53ee7f3d 15 j
478 477 > pick b605fb7503f2 14 i
479 478 > fold ee118ab9fa44 16 k
480 479 > EOF
481 480 $ hg log -G
482 481 @ 27:f9daec13fb98 (secret) i
483 482 |
484 483 o 24:49807617f46a (secret) g
485 484 |
486 485 o 21:050280826e04 (draft) h
487 486 |
488 487 o 12:40db8afa467b (public) c
489 488 |
490 489 o 0:cb9a9f314b8b (public) a
491 490
492 491 $ hg co 49807617f46a
493 492 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
494 493 $ echo wat >> wat
495 494 $ hg add wat
496 495 $ hg ci -m 'add wat'
497 496 created new head
498 497 $ hg merge f9daec13fb98
499 498 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
500 499 (branch merge, don't forget to commit)
501 500 $ hg ci -m 'merge'
502 501 $ echo not wat > wat
503 502 $ hg ci -m 'modify wat'
504 503 $ hg histedit 050280826e04
505 504 abort: cannot edit history that contains merges
506 505 [255]
507 506 $ cd ..
508 507
509 508 Check abort behavior
510 509 -------------------------------------------
511 510
512 511 We checks that abort properly clean the repository so the same histedit can be
513 512 attempted later.
514 513
515 514 $ cp -R base abort
516 515 $ cd abort
517 516 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
518 517 > pick b449568bf7fc 13 f
519 518 > pick 7395e1ff83bd 15 h
520 519 > pick 6b70183d2492 14 g
521 520 > pick b605fb7503f2 16 i
522 521 > roll 3a6c53ee7f3d 17 j
523 522 > edit ee118ab9fa44 18 k
524 523 > EOF
525 524 Editing (ee118ab9fa44), you may commit or record as needed now.
526 525 (hg histedit --continue to resume)
527 526 [1]
528 527
529 528 $ hg histedit --abort
530 529 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
531 530 saved backup bundle to $TESTTMP/abort/.hg/strip-backup/4dc06258baa6-dff4ef05-backup.hg
532 531
533 532 $ hg log -G
534 533 @ 18:ee118ab9fa44 (secret) k
535 534 |
536 535 o 17:3a6c53ee7f3d (secret) j
537 536 |
538 537 o 16:b605fb7503f2 (secret) i
539 538 |
540 539 o 15:7395e1ff83bd (draft) h
541 540 |
542 541 o 14:6b70183d2492 (draft) g
543 542 |
544 543 o 13:b449568bf7fc (draft) f
545 544 |
546 545 o 12:40db8afa467b (public) c
547 546 |
548 547 o 0:cb9a9f314b8b (public) a
549 548
550 549 $ hg histedit -r 'b449568bf7fc' --commands - << EOF --config experimental.evolution.track-operation=1
551 550 > pick b449568bf7fc 13 f
552 551 > pick 7395e1ff83bd 15 h
553 552 > pick 6b70183d2492 14 g
554 553 > pick b605fb7503f2 16 i
555 554 > pick 3a6c53ee7f3d 17 j
556 555 > edit ee118ab9fa44 18 k
557 556 > EOF
558 557 Editing (ee118ab9fa44), you may commit or record as needed now.
559 558 (hg histedit --continue to resume)
560 559 [1]
561 560 $ hg histedit --continue --config experimental.evolution.track-operation=1
562 561 $ hg log -G
563 562 @ 23:175d6b286a22 (secret) k
564 563 |
565 564 o 22:44ca09d59ae4 (secret) j
566 565 |
567 566 o 21:31747692a644 (secret) i
568 567 |
569 568 o 20:9985cd4f21fa (draft) g
570 569 |
571 570 o 19:4dc06258baa6 (draft) h
572 571 |
573 572 o 13:b449568bf7fc (draft) f
574 573 |
575 574 o 12:40db8afa467b (public) c
576 575 |
577 576 o 0:cb9a9f314b8b (public) a
578 577
579 578 $ hg debugobsolete --rev .
580 579 ee118ab9fa44ebb86be85996548b5517a39e5093 175d6b286a224c23f192e79a581ce83131a53fa2 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
@@ -1,204 +1,203 b''
1 1 =====================
2 2 Test workflow options
3 3 =====================
4 4
5 5 $ . "$TESTDIR/testlib/obsmarker-common.sh"
6 6
7 7 Test single head enforcing - Setup
8 8 =============================================
9 9
10 10 $ cat << EOF >> $HGRCPATH
11 11 > [experimental]
12 12 > evolution = all
13 13 > EOF
14 14 $ hg init single-head-server
15 15 $ cd single-head-server
16 16 $ cat <<EOF >> .hg/hgrc
17 17 > [phases]
18 18 > publish = no
19 19 > [experimental]
20 20 > single-head-per-branch = yes
21 21 > EOF
22 22 $ mkcommit ROOT
23 23 $ mkcommit c_dA0
24 24 $ cd ..
25 25
26 26 $ hg clone single-head-server client
27 27 updating to branch default
28 28 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 29
30 30 Test single head enforcing - with branch only
31 31 ---------------------------------------------
32 32
33 33 $ cd client
34 34
35 35 continuing the current defaultbranch
36 36
37 37 $ mkcommit c_dB0
38 38 $ hg push
39 39 pushing to $TESTTMP/single-head-server
40 40 searching for changes
41 41 adding changesets
42 42 adding manifests
43 43 adding file changes
44 44 added 1 changesets with 1 changes to 1 files
45 45
46 46 creating a new branch
47 47
48 48 $ hg up 'desc("ROOT")'
49 49 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
50 50 $ hg branch branch_A
51 51 marked working directory as branch branch_A
52 52 (branches are permanent and global, did you want a bookmark?)
53 53 $ mkcommit c_aC0
54 54 $ hg push --new-branch
55 55 pushing to $TESTTMP/single-head-server
56 56 searching for changes
57 57 adding changesets
58 58 adding manifests
59 59 adding file changes
60 60 added 1 changesets with 1 changes to 1 files (+1 heads)
61 61
62 62 Create a new head on the default branch
63 63
64 64 $ hg up 'desc("c_dA0")'
65 65 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
66 66 $ mkcommit c_dD0
67 67 created new head
68 68 $ hg push -f
69 69 pushing to $TESTTMP/single-head-server
70 70 searching for changes
71 71 adding changesets
72 72 adding manifests
73 73 adding file changes
74 74 added 1 changesets with 1 changes to 1 files (+1 heads)
75 75 transaction abort!
76 76 rollback completed
77 77 abort: rejecting multiple heads on branch "default"
78 78 (2 heads: 286d02a6e2a2 9bf953aa81f6)
79 79 [255]
80 80
81 81 remerge them
82 82
83 83 $ hg merge
84 84 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 85 (branch merge, don't forget to commit)
86 86 $ mkcommit c_dE0
87 87 $ hg push
88 88 pushing to $TESTTMP/single-head-server
89 89 searching for changes
90 90 adding changesets
91 91 adding manifests
92 92 adding file changes
93 93 added 2 changesets with 2 changes to 2 files
94 94
95 95 Test single head enforcing - after rewrite
96 96 ------------------------------------------
97 97
98 98 $ mkcommit c_dF0
99 99 $ hg push
100 100 pushing to $TESTTMP/single-head-server
101 101 searching for changes
102 102 adding changesets
103 103 adding manifests
104 104 adding file changes
105 105 added 1 changesets with 1 changes to 1 files
106 106 $ hg commit --amend -m c_dF1
107 107 $ hg push
108 108 pushing to $TESTTMP/single-head-server
109 109 searching for changes
110 110 adding changesets
111 111 adding manifests
112 112 adding file changes
113 113 added 1 changesets with 0 changes to 1 files (+1 heads)
114 114 1 new obsolescence markers
115 115 obsoleted 1 changesets
116 116
117 117 Check it does to interfer with strip
118 118 ------------------------------------
119 119
120 120 setup
121 121
122 122 $ hg branch branch_A --force
123 123 marked working directory as branch branch_A
124 124 $ mkcommit c_aG0
125 125 created new head
126 126 $ hg update 'desc("c_dF1")'
127 127 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
128 128 $ mkcommit c_dH0
129 129 $ hg update 'desc("c_aG0")'
130 130 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
131 131 $ hg merge
132 132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 133 (branch merge, don't forget to commit)
134 134 $ mkcommit c_aI0
135 135 $ hg log -G
136 136 @ changeset: 10:49003e504178
137 137 |\ branch: branch_A
138 138 | | tag: tip
139 139 | | parent: 8:a33fb808fb4b
140 140 | | parent: 3:840af1c6bc88
141 141 | | user: test
142 142 | | date: Thu Jan 01 00:00:00 1970 +0000
143 143 | | summary: c_aI0
144 144 | |
145 145 | | o changeset: 9:fe47ea669cea
146 146 | | | parent: 7:99a2dc242c5d
147 147 | | | user: test
148 148 | | | date: Thu Jan 01 00:00:00 1970 +0000
149 149 | | | summary: c_dH0
150 150 | | |
151 151 | o | changeset: 8:a33fb808fb4b
152 152 | |/ branch: branch_A
153 153 | | user: test
154 154 | | date: Thu Jan 01 00:00:00 1970 +0000
155 155 | | summary: c_aG0
156 156 | |
157 157 | o changeset: 7:99a2dc242c5d
158 158 | | parent: 5:6ed1df20edb1
159 159 | | user: test
160 160 | | date: Thu Jan 01 00:00:00 1970 +0000
161 161 | | summary: c_dF1
162 162 | |
163 163 | o changeset: 5:6ed1df20edb1
164 164 | |\ parent: 4:9bf953aa81f6
165 165 | | | parent: 2:286d02a6e2a2
166 166 | | | user: test
167 167 | | | date: Thu Jan 01 00:00:00 1970 +0000
168 168 | | | summary: c_dE0
169 169 | | |
170 170 | | o changeset: 4:9bf953aa81f6
171 171 | | | parent: 1:134bc3852ad2
172 172 | | | user: test
173 173 | | | date: Thu Jan 01 00:00:00 1970 +0000
174 174 | | | summary: c_dD0
175 175 | | |
176 176 o | | changeset: 3:840af1c6bc88
177 177 | | | branch: branch_A
178 178 | | | parent: 0:ea207398892e
179 179 | | | user: test
180 180 | | | date: Thu Jan 01 00:00:00 1970 +0000
181 181 | | | summary: c_aC0
182 182 | | |
183 183 | o | changeset: 2:286d02a6e2a2
184 184 | |/ user: test
185 185 | | date: Thu Jan 01 00:00:00 1970 +0000
186 186 | | summary: c_dB0
187 187 | |
188 188 | o changeset: 1:134bc3852ad2
189 189 |/ user: test
190 190 | date: Thu Jan 01 00:00:00 1970 +0000
191 191 | summary: c_dA0
192 192 |
193 193 o changeset: 0:ea207398892e
194 194 user: test
195 195 date: Thu Jan 01 00:00:00 1970 +0000
196 196 summary: ROOT
197 197
198 198
199 199 actual stripping
200 200
201 201 $ hg strip --config extensions.strip= --rev 'desc("c_dH0")'
202 202 saved backup bundle to $TESTTMP/client/.hg/strip-backup/fe47ea669cea-a41bf5a9-backup.hg
203 warning: ignoring unknown working parent 49003e504178!
204 203
General Comments 0
You need to be logged in to leave comments. Login now