##// END OF EJS Templates
context: move logic from changectx.__init__ to localrepo.__getitem__ (API)...
Martin von Zweigbergk -
r39994:3d35304b default
parent child Browse files
Show More
@@ -1,2497 +1,2437 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 bin,
19 18 hex,
20 19 modifiednodeid,
21 20 nullid,
22 21 nullrev,
23 22 short,
24 23 wdirfilenodeids,
25 24 wdirid,
26 25 )
27 26 from . import (
28 27 dagop,
29 28 encoding,
30 29 error,
31 30 fileset,
32 31 match as matchmod,
33 32 obsolete as obsmod,
34 33 patch,
35 34 pathutil,
36 35 phases,
37 36 pycompat,
38 37 repoview,
39 38 scmutil,
40 39 sparse,
41 40 subrepo,
42 41 subrepoutil,
43 42 util,
44 43 )
45 44 from .utils import (
46 45 dateutil,
47 46 stringutil,
48 47 )
49 48
50 49 propertycache = util.propertycache
51 50
52 51 class basectx(object):
53 52 """A basectx object represents the common logic for its children:
54 53 changectx: read-only context that is already present in the repo,
55 54 workingctx: a context that represents the working directory and can
56 55 be committed,
57 56 memctx: a context that represents changes in-memory and can also
58 57 be committed."""
59 58
60 59 def __init__(self, repo):
61 60 self._repo = repo
62 61
63 62 def __bytes__(self):
64 63 return short(self.node())
65 64
66 65 __str__ = encoding.strmethod(__bytes__)
67 66
68 67 def __repr__(self):
69 68 return r"<%s %s>" % (type(self).__name__, str(self))
70 69
71 70 def __eq__(self, other):
72 71 try:
73 72 return type(self) == type(other) and self._rev == other._rev
74 73 except AttributeError:
75 74 return False
76 75
77 76 def __ne__(self, other):
78 77 return not (self == other)
79 78
80 79 def __contains__(self, key):
81 80 return key in self._manifest
82 81
83 82 def __getitem__(self, key):
84 83 return self.filectx(key)
85 84
86 85 def __iter__(self):
87 86 return iter(self._manifest)
88 87
89 88 def _buildstatusmanifest(self, status):
90 89 """Builds a manifest that includes the given status results, if this is
91 90 a working copy context. For non-working copy contexts, it just returns
92 91 the normal manifest."""
93 92 return self.manifest()
94 93
95 94 def _matchstatus(self, other, match):
96 95 """This internal method provides a way for child objects to override the
97 96 match operator.
98 97 """
99 98 return match
100 99
101 100 def _buildstatus(self, other, s, match, listignored, listclean,
102 101 listunknown):
103 102 """build a status with respect to another context"""
104 103 # Load earliest manifest first for caching reasons. More specifically,
105 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 106 # 1000 and cache it so that when you read 1001, we just need to apply a
108 107 # delta to what's in the cache. So that's one full reconstruction + one
109 108 # delta application.
110 109 mf2 = None
111 110 if self.rev() is not None and self.rev() < other.rev():
112 111 mf2 = self._buildstatusmanifest(s)
113 112 mf1 = other._buildstatusmanifest(s)
114 113 if mf2 is None:
115 114 mf2 = self._buildstatusmanifest(s)
116 115
117 116 modified, added = [], []
118 117 removed = []
119 118 clean = []
120 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 120 deletedset = set(deleted)
122 121 d = mf1.diff(mf2, match=match, clean=listclean)
123 122 for fn, value in d.iteritems():
124 123 if fn in deletedset:
125 124 continue
126 125 if value is None:
127 126 clean.append(fn)
128 127 continue
129 128 (node1, flag1), (node2, flag2) = value
130 129 if node1 is None:
131 130 added.append(fn)
132 131 elif node2 is None:
133 132 removed.append(fn)
134 133 elif flag1 != flag2:
135 134 modified.append(fn)
136 135 elif node2 not in wdirfilenodeids:
137 136 # When comparing files between two commits, we save time by
138 137 # not comparing the file contents when the nodeids differ.
139 138 # Note that this means we incorrectly report a reverted change
140 139 # to a file as a modification.
141 140 modified.append(fn)
142 141 elif self[fn].cmp(other[fn]):
143 142 modified.append(fn)
144 143 else:
145 144 clean.append(fn)
146 145
147 146 if removed:
148 147 # need to filter files if they are already reported as removed
149 148 unknown = [fn for fn in unknown if fn not in mf1 and
150 149 (not match or match(fn))]
151 150 ignored = [fn for fn in ignored if fn not in mf1 and
152 151 (not match or match(fn))]
153 152 # if they're deleted, don't report them as removed
154 153 removed = [fn for fn in removed if fn not in deletedset]
155 154
156 155 return scmutil.status(modified, added, removed, deleted, unknown,
157 156 ignored, clean)
158 157
159 158 @propertycache
160 159 def substate(self):
161 160 return subrepoutil.state(self, self._repo.ui)
162 161
163 162 def subrev(self, subpath):
164 163 return self.substate[subpath][1]
165 164
166 165 def rev(self):
167 166 return self._rev
168 167 def node(self):
169 168 return self._node
170 169 def hex(self):
171 170 return hex(self.node())
172 171 def manifest(self):
173 172 return self._manifest
174 173 def manifestctx(self):
175 174 return self._manifestctx
176 175 def repo(self):
177 176 return self._repo
178 177 def phasestr(self):
179 178 return phases.phasenames[self.phase()]
180 179 def mutable(self):
181 180 return self.phase() > phases.public
182 181
183 182 def matchfileset(self, expr, badfn=None):
184 183 return fileset.match(self, expr, badfn=badfn)
185 184
186 185 def obsolete(self):
187 186 """True if the changeset is obsolete"""
188 187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 188
190 189 def extinct(self):
191 190 """True if the changeset is extinct"""
192 191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 192
194 193 def orphan(self):
195 194 """True if the changeset is not obsolete, but its ancestor is"""
196 195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 196
198 197 def phasedivergent(self):
199 198 """True if the changeset tries to be a successor of a public changeset
200 199
201 200 Only non-public and non-obsolete changesets may be phase-divergent.
202 201 """
203 202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 203
205 204 def contentdivergent(self):
206 205 """Is a successor of a changeset with multiple possible successor sets
207 206
208 207 Only non-public and non-obsolete changesets may be content-divergent.
209 208 """
210 209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 210
212 211 def isunstable(self):
213 212 """True if the changeset is either orphan, phase-divergent or
214 213 content-divergent"""
215 214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216 215
217 216 def instabilities(self):
218 217 """return the list of instabilities affecting this changeset.
219 218
220 219 Instabilities are returned as strings. possible values are:
221 220 - orphan,
222 221 - phase-divergent,
223 222 - content-divergent.
224 223 """
225 224 instabilities = []
226 225 if self.orphan():
227 226 instabilities.append('orphan')
228 227 if self.phasedivergent():
229 228 instabilities.append('phase-divergent')
230 229 if self.contentdivergent():
231 230 instabilities.append('content-divergent')
232 231 return instabilities
233 232
234 233 def parents(self):
235 234 """return contexts for each parent changeset"""
236 235 return self._parents
237 236
238 237 def p1(self):
239 238 return self._parents[0]
240 239
241 240 def p2(self):
242 241 parents = self._parents
243 242 if len(parents) == 2:
244 243 return parents[1]
245 244 return self._repo[nullrev]
246 245
247 246 def _fileinfo(self, path):
248 247 if r'_manifest' in self.__dict__:
249 248 try:
250 249 return self._manifest[path], self._manifest.flags(path)
251 250 except KeyError:
252 251 raise error.ManifestLookupError(self._node, path,
253 252 _('not found in manifest'))
254 253 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 254 if path in self._manifestdelta:
256 255 return (self._manifestdelta[path],
257 256 self._manifestdelta.flags(path))
258 257 mfl = self._repo.manifestlog
259 258 try:
260 259 node, flag = mfl[self._changeset.manifest].find(path)
261 260 except KeyError:
262 261 raise error.ManifestLookupError(self._node, path,
263 262 _('not found in manifest'))
264 263
265 264 return node, flag
266 265
267 266 def filenode(self, path):
268 267 return self._fileinfo(path)[0]
269 268
270 269 def flags(self, path):
271 270 try:
272 271 return self._fileinfo(path)[1]
273 272 except error.LookupError:
274 273 return ''
275 274
276 275 def sub(self, path, allowcreate=True):
277 276 '''return a subrepo for the stored revision of path, never wdir()'''
278 277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279 278
280 279 def nullsub(self, path, pctx):
281 280 return subrepo.nullsubrepo(self, path, pctx)
282 281
283 282 def workingsub(self, path):
284 283 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 284 context.
286 285 '''
287 286 return subrepo.subrepo(self, path, allowwdir=True)
288 287
289 288 def match(self, pats=None, include=None, exclude=None, default='glob',
290 289 listsubrepos=False, badfn=None):
291 290 r = self._repo
292 291 return matchmod.match(r.root, r.getcwd(), pats,
293 292 include, exclude, default,
294 293 auditor=r.nofsauditor, ctx=self,
295 294 listsubrepos=listsubrepos, badfn=badfn)
296 295
297 296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 297 losedatafn=None, prefix='', relroot='', copy=None,
299 298 hunksfilterfn=None):
300 299 """Returns a diff generator for the given contexts and matcher"""
301 300 if ctx2 is None:
302 301 ctx2 = self.p1()
303 302 if ctx2 is not None:
304 303 ctx2 = self._repo[ctx2]
305 304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
306 305 opts=opts, losedatafn=losedatafn, prefix=prefix,
307 306 relroot=relroot, copy=copy,
308 307 hunksfilterfn=hunksfilterfn)
309 308
310 309 def dirs(self):
311 310 return self._manifest.dirs()
312 311
313 312 def hasdir(self, dir):
314 313 return self._manifest.hasdir(dir)
315 314
316 315 def status(self, other=None, match=None, listignored=False,
317 316 listclean=False, listunknown=False, listsubrepos=False):
318 317 """return status of files between two nodes or node and working
319 318 directory.
320 319
321 320 If other is None, compare this node with working directory.
322 321
323 322 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 323 """
325 324
326 325 ctx1 = self
327 326 ctx2 = self._repo[other]
328 327
329 328 # This next code block is, admittedly, fragile logic that tests for
330 329 # reversing the contexts and wouldn't need to exist if it weren't for
331 330 # the fast (and common) code path of comparing the working directory
332 331 # with its first parent.
333 332 #
334 333 # What we're aiming for here is the ability to call:
335 334 #
336 335 # workingctx.status(parentctx)
337 336 #
338 337 # If we always built the manifest for each context and compared those,
339 338 # then we'd be done. But the special case of the above call means we
340 339 # just copy the manifest of the parent.
341 340 reversed = False
342 341 if (not isinstance(ctx1, changectx)
343 342 and isinstance(ctx2, changectx)):
344 343 reversed = True
345 344 ctx1, ctx2 = ctx2, ctx1
346 345
347 346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 347 match = ctx2._matchstatus(ctx1, match)
349 348 r = scmutil.status([], [], [], [], [], [], [])
350 349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 350 listunknown)
352 351
353 352 if reversed:
354 353 # Reverse added and removed. Clear deleted, unknown and ignored as
355 354 # these make no sense to reverse.
356 355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 356 r.clean)
358 357
359 358 if listsubrepos:
360 359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 360 try:
362 361 rev2 = ctx2.subrev(subpath)
363 362 except KeyError:
364 363 # A subrepo that existed in node1 was deleted between
365 364 # node1 and node2 (inclusive). Thus, ctx2's substate
366 365 # won't contain that subpath. The best we can do ignore it.
367 366 rev2 = None
368 367 submatch = matchmod.subdirmatcher(subpath, match)
369 368 s = sub.status(rev2, match=submatch, ignored=listignored,
370 369 clean=listclean, unknown=listunknown,
371 370 listsubrepos=True)
372 371 for rfiles, sfiles in zip(r, s):
373 372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374 373
375 374 narrowmatch = self._repo.narrowmatch()
376 375 if not narrowmatch.always():
377 376 for l in r:
378 377 l[:] = list(filter(narrowmatch, l))
379 378 for l in r:
380 379 l.sort()
381 380
382 381 return r
383 382
384 383 class changectx(basectx):
385 384 """A changecontext object makes access to data related to a particular
386 385 changeset convenient. It represents a read-only context already present in
387 386 the repo."""
388 def __init__(self, repo, changeid='.'):
387 def __init__(self, repo, rev, node):
389 388 """changeid is a revision number, node, or tag"""
390 389 super(changectx, self).__init__(repo)
391
392 try:
393 if isinstance(changeid, int):
394 self._node = repo.changelog.node(changeid)
395 self._rev = changeid
396 return
397 elif changeid == 'null':
398 self._node = nullid
399 self._rev = nullrev
400 return
401 elif changeid == 'tip':
402 self._node = repo.changelog.tip()
403 self._rev = repo.changelog.rev(self._node)
404 return
405 elif (changeid == '.'
406 or repo.local() and changeid == repo.dirstate.p1()):
407 # this is a hack to delay/avoid loading obsmarkers
408 # when we know that '.' won't be hidden
409 self._node = repo.dirstate.p1()
410 self._rev = repo.unfiltered().changelog.rev(self._node)
411 return
412 elif len(changeid) == 20:
413 try:
414 self._node = changeid
415 self._rev = repo.changelog.rev(changeid)
416 return
417 except error.FilteredLookupError:
418 changeid = hex(changeid) # for the error message
419 raise
420 except LookupError:
421 # check if it might have come from damaged dirstate
422 #
423 # XXX we could avoid the unfiltered if we had a recognizable
424 # exception for filtered changeset access
425 if (repo.local()
426 and changeid in repo.unfiltered().dirstate.parents()):
427 msg = _("working directory has unknown parent '%s'!")
428 raise error.Abort(msg % short(changeid))
429 changeid = hex(changeid) # for the error message
430
431 elif len(changeid) == 40:
432 try:
433 self._node = bin(changeid)
434 self._rev = repo.changelog.rev(self._node)
435 return
436 except error.FilteredLookupError:
437 raise
438 except LookupError:
439 pass
440 else:
441 raise error.ProgrammingError(
442 "unsupported changeid '%s' of type %s" %
443 (changeid, type(changeid)))
444
445 except (error.FilteredIndexError, error.FilteredLookupError):
446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
447 % pycompat.bytestr(changeid))
448 except IndexError:
449 pass
450 raise error.RepoLookupError(
451 _("unknown revision '%s'") % changeid)
390 self._rev = rev
391 self._node = node
452 392
453 393 def __hash__(self):
454 394 try:
455 395 return hash(self._rev)
456 396 except AttributeError:
457 397 return id(self)
458 398
459 399 def __nonzero__(self):
460 400 return self._rev != nullrev
461 401
462 402 __bool__ = __nonzero__
463 403
464 404 @propertycache
465 405 def _changeset(self):
466 406 return self._repo.changelog.changelogrevision(self.rev())
467 407
468 408 @propertycache
469 409 def _manifest(self):
470 410 return self._manifestctx.read()
471 411
472 412 @property
473 413 def _manifestctx(self):
474 414 return self._repo.manifestlog[self._changeset.manifest]
475 415
476 416 @propertycache
477 417 def _manifestdelta(self):
478 418 return self._manifestctx.readdelta()
479 419
480 420 @propertycache
481 421 def _parents(self):
482 422 repo = self._repo
483 423 p1, p2 = repo.changelog.parentrevs(self._rev)
484 424 if p2 == nullrev:
485 425 return [repo[p1]]
486 426 return [repo[p1], repo[p2]]
487 427
488 428 def changeset(self):
489 429 c = self._changeset
490 430 return (
491 431 c.manifest,
492 432 c.user,
493 433 c.date,
494 434 c.files,
495 435 c.description,
496 436 c.extra,
497 437 )
498 438 def manifestnode(self):
499 439 return self._changeset.manifest
500 440
501 441 def user(self):
502 442 return self._changeset.user
503 443 def date(self):
504 444 return self._changeset.date
505 445 def files(self):
506 446 return self._changeset.files
507 447 def description(self):
508 448 return self._changeset.description
509 449 def branch(self):
510 450 return encoding.tolocal(self._changeset.extra.get("branch"))
511 451 def closesbranch(self):
512 452 return 'close' in self._changeset.extra
513 453 def extra(self):
514 454 """Return a dict of extra information."""
515 455 return self._changeset.extra
516 456 def tags(self):
517 457 """Return a list of byte tag names"""
518 458 return self._repo.nodetags(self._node)
519 459 def bookmarks(self):
520 460 """Return a list of byte bookmark names."""
521 461 return self._repo.nodebookmarks(self._node)
522 462 def phase(self):
523 463 return self._repo._phasecache.phase(self._repo, self._rev)
524 464 def hidden(self):
525 465 return self._rev in repoview.filterrevs(self._repo, 'visible')
526 466
527 467 def isinmemory(self):
528 468 return False
529 469
530 470 def children(self):
531 471 """return list of changectx contexts for each child changeset.
532 472
533 473 This returns only the immediate child changesets. Use descendants() to
534 474 recursively walk children.
535 475 """
536 476 c = self._repo.changelog.children(self._node)
537 477 return [self._repo[x] for x in c]
538 478
539 479 def ancestors(self):
540 480 for a in self._repo.changelog.ancestors([self._rev]):
541 481 yield self._repo[a]
542 482
543 483 def descendants(self):
544 484 """Recursively yield all children of the changeset.
545 485
546 486 For just the immediate children, use children()
547 487 """
548 488 for d in self._repo.changelog.descendants([self._rev]):
549 489 yield self._repo[d]
550 490
551 491 def filectx(self, path, fileid=None, filelog=None):
552 492 """get a file context from this changeset"""
553 493 if fileid is None:
554 494 fileid = self.filenode(path)
555 495 return filectx(self._repo, path, fileid=fileid,
556 496 changectx=self, filelog=filelog)
557 497
558 498 def ancestor(self, c2, warn=False):
559 499 """return the "best" ancestor context of self and c2
560 500
561 501 If there are multiple candidates, it will show a message and check
562 502 merge.preferancestor configuration before falling back to the
563 503 revlog ancestor."""
564 504 # deal with workingctxs
565 505 n2 = c2._node
566 506 if n2 is None:
567 507 n2 = c2._parents[0]._node
568 508 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
569 509 if not cahs:
570 510 anc = nullid
571 511 elif len(cahs) == 1:
572 512 anc = cahs[0]
573 513 else:
574 514 # experimental config: merge.preferancestor
575 515 for r in self._repo.ui.configlist('merge', 'preferancestor'):
576 516 try:
577 517 ctx = scmutil.revsymbol(self._repo, r)
578 518 except error.RepoLookupError:
579 519 continue
580 520 anc = ctx.node()
581 521 if anc in cahs:
582 522 break
583 523 else:
584 524 anc = self._repo.changelog.ancestor(self._node, n2)
585 525 if warn:
586 526 self._repo.ui.status(
587 527 (_("note: using %s as ancestor of %s and %s\n") %
588 528 (short(anc), short(self._node), short(n2))) +
589 529 ''.join(_(" alternatively, use --config "
590 530 "merge.preferancestor=%s\n") %
591 531 short(n) for n in sorted(cahs) if n != anc))
592 532 return self._repo[anc]
593 533
594 534 def isancestorof(self, other):
595 535 """True if this changeset is an ancestor of other"""
596 536 return self._repo.changelog.isancestorrev(self._rev, other._rev)
597 537
598 538 def walk(self, match):
599 539 '''Generates matching file names.'''
600 540
601 541 # Wrap match.bad method to have message with nodeid
602 542 def bad(fn, msg):
603 543 # The manifest doesn't know about subrepos, so don't complain about
604 544 # paths into valid subrepos.
605 545 if any(fn == s or fn.startswith(s + '/')
606 546 for s in self.substate):
607 547 return
608 548 match.bad(fn, _('no such file in rev %s') % self)
609 549
610 550 m = matchmod.badmatch(match, bad)
611 551 return self._manifest.walk(m)
612 552
613 553 def matches(self, match):
614 554 return self.walk(match)
615 555
616 556 class basefilectx(object):
617 557 """A filecontext object represents the common logic for its children:
618 558 filectx: read-only access to a filerevision that is already present
619 559 in the repo,
620 560 workingfilectx: a filecontext that represents files from the working
621 561 directory,
622 562 memfilectx: a filecontext that represents files in-memory,
623 563 """
624 564 @propertycache
625 565 def _filelog(self):
626 566 return self._repo.file(self._path)
627 567
628 568 @propertycache
629 569 def _changeid(self):
630 570 if r'_changeid' in self.__dict__:
631 571 return self._changeid
632 572 elif r'_changectx' in self.__dict__:
633 573 return self._changectx.rev()
634 574 elif r'_descendantrev' in self.__dict__:
635 575 # this file context was created from a revision with a known
636 576 # descendant, we can (lazily) correct for linkrev aliases
637 577 return self._adjustlinkrev(self._descendantrev)
638 578 else:
639 579 return self._filelog.linkrev(self._filerev)
640 580
641 581 @propertycache
642 582 def _filenode(self):
643 583 if r'_fileid' in self.__dict__:
644 584 return self._filelog.lookup(self._fileid)
645 585 else:
646 586 return self._changectx.filenode(self._path)
647 587
648 588 @propertycache
649 589 def _filerev(self):
650 590 return self._filelog.rev(self._filenode)
651 591
652 592 @propertycache
653 593 def _repopath(self):
654 594 return self._path
655 595
656 596 def __nonzero__(self):
657 597 try:
658 598 self._filenode
659 599 return True
660 600 except error.LookupError:
661 601 # file is missing
662 602 return False
663 603
664 604 __bool__ = __nonzero__
665 605
666 606 def __bytes__(self):
667 607 try:
668 608 return "%s@%s" % (self.path(), self._changectx)
669 609 except error.LookupError:
670 610 return "%s@???" % self.path()
671 611
672 612 __str__ = encoding.strmethod(__bytes__)
673 613
674 614 def __repr__(self):
675 615 return r"<%s %s>" % (type(self).__name__, str(self))
676 616
677 617 def __hash__(self):
678 618 try:
679 619 return hash((self._path, self._filenode))
680 620 except AttributeError:
681 621 return id(self)
682 622
683 623 def __eq__(self, other):
684 624 try:
685 625 return (type(self) == type(other) and self._path == other._path
686 626 and self._filenode == other._filenode)
687 627 except AttributeError:
688 628 return False
689 629
690 630 def __ne__(self, other):
691 631 return not (self == other)
692 632
693 633 def filerev(self):
694 634 return self._filerev
695 635 def filenode(self):
696 636 return self._filenode
697 637 @propertycache
698 638 def _flags(self):
699 639 return self._changectx.flags(self._path)
700 640 def flags(self):
701 641 return self._flags
702 642 def filelog(self):
703 643 return self._filelog
704 644 def rev(self):
705 645 return self._changeid
706 646 def linkrev(self):
707 647 return self._filelog.linkrev(self._filerev)
708 648 def node(self):
709 649 return self._changectx.node()
710 650 def hex(self):
711 651 return self._changectx.hex()
712 652 def user(self):
713 653 return self._changectx.user()
714 654 def date(self):
715 655 return self._changectx.date()
716 656 def files(self):
717 657 return self._changectx.files()
718 658 def description(self):
719 659 return self._changectx.description()
720 660 def branch(self):
721 661 return self._changectx.branch()
722 662 def extra(self):
723 663 return self._changectx.extra()
724 664 def phase(self):
725 665 return self._changectx.phase()
726 666 def phasestr(self):
727 667 return self._changectx.phasestr()
728 668 def obsolete(self):
729 669 return self._changectx.obsolete()
730 670 def instabilities(self):
731 671 return self._changectx.instabilities()
732 672 def manifest(self):
733 673 return self._changectx.manifest()
734 674 def changectx(self):
735 675 return self._changectx
736 676 def renamed(self):
737 677 return self._copied
738 678 def repo(self):
739 679 return self._repo
740 680 def size(self):
741 681 return len(self.data())
742 682
743 683 def path(self):
744 684 return self._path
745 685
746 686 def isbinary(self):
747 687 try:
748 688 return stringutil.binary(self.data())
749 689 except IOError:
750 690 return False
751 691 def isexec(self):
752 692 return 'x' in self.flags()
753 693 def islink(self):
754 694 return 'l' in self.flags()
755 695
756 696 def isabsent(self):
757 697 """whether this filectx represents a file not in self._changectx
758 698
759 699 This is mainly for merge code to detect change/delete conflicts. This is
760 700 expected to be True for all subclasses of basectx."""
761 701 return False
762 702
763 703 _customcmp = False
764 704 def cmp(self, fctx):
765 705 """compare with other file context
766 706
767 707 returns True if different than fctx.
768 708 """
769 709 if fctx._customcmp:
770 710 return fctx.cmp(self)
771 711
772 712 if (fctx._filenode is None
773 713 and (self._repo._encodefilterpats
774 714 # if file data starts with '\1\n', empty metadata block is
775 715 # prepended, which adds 4 bytes to filelog.size().
776 716 or self.size() - 4 == fctx.size())
777 717 or self.size() == fctx.size()):
778 718 return self._filelog.cmp(self._filenode, fctx.data())
779 719
780 720 return True
781 721
782 722 def _adjustlinkrev(self, srcrev, inclusive=False):
783 723 """return the first ancestor of <srcrev> introducing <fnode>
784 724
785 725 If the linkrev of the file revision does not point to an ancestor of
786 726 srcrev, we'll walk down the ancestors until we find one introducing
787 727 this file revision.
788 728
789 729 :srcrev: the changeset revision we search ancestors from
790 730 :inclusive: if true, the src revision will also be checked
791 731 """
792 732 repo = self._repo
793 733 cl = repo.unfiltered().changelog
794 734 mfl = repo.manifestlog
795 735 # fetch the linkrev
796 736 lkr = self.linkrev()
797 737 # hack to reuse ancestor computation when searching for renames
798 738 memberanc = getattr(self, '_ancestrycontext', None)
799 739 iteranc = None
800 740 if srcrev is None:
801 741 # wctx case, used by workingfilectx during mergecopy
802 742 revs = [p.rev() for p in self._repo[None].parents()]
803 743 inclusive = True # we skipped the real (revless) source
804 744 else:
805 745 revs = [srcrev]
806 746 if memberanc is None:
807 747 memberanc = iteranc = cl.ancestors(revs, lkr,
808 748 inclusive=inclusive)
809 749 # check if this linkrev is an ancestor of srcrev
810 750 if lkr not in memberanc:
811 751 if iteranc is None:
812 752 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
813 753 fnode = self._filenode
814 754 path = self._path
815 755 for a in iteranc:
816 756 ac = cl.read(a) # get changeset data (we avoid object creation)
817 757 if path in ac[3]: # checking the 'files' field.
818 758 # The file has been touched, check if the content is
819 759 # similar to the one we search for.
820 760 if fnode == mfl[ac[0]].readfast().get(path):
821 761 return a
822 762 # In theory, we should never get out of that loop without a result.
823 763 # But if manifest uses a buggy file revision (not children of the
824 764 # one it replaces) we could. Such a buggy situation will likely
825 765 # result is crash somewhere else at to some point.
826 766 return lkr
827 767
828 768 def introrev(self):
829 769 """return the rev of the changeset which introduced this file revision
830 770
831 771 This method is different from linkrev because it take into account the
832 772 changeset the filectx was created from. It ensures the returned
833 773 revision is one of its ancestors. This prevents bugs from
834 774 'linkrev-shadowing' when a file revision is used by multiple
835 775 changesets.
836 776 """
837 777 lkr = self.linkrev()
838 778 attrs = vars(self)
839 779 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
840 780 if noctx or self.rev() == lkr:
841 781 return self.linkrev()
842 782 return self._adjustlinkrev(self.rev(), inclusive=True)
843 783
844 784 def introfilectx(self):
845 785 """Return filectx having identical contents, but pointing to the
846 786 changeset revision where this filectx was introduced"""
847 787 introrev = self.introrev()
848 788 if self.rev() == introrev:
849 789 return self
850 790 return self.filectx(self.filenode(), changeid=introrev)
851 791
852 792 def _parentfilectx(self, path, fileid, filelog):
853 793 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
854 794 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
855 795 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
856 796 # If self is associated with a changeset (probably explicitly
857 797 # fed), ensure the created filectx is associated with a
858 798 # changeset that is an ancestor of self.changectx.
859 799 # This lets us later use _adjustlinkrev to get a correct link.
860 800 fctx._descendantrev = self.rev()
861 801 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
862 802 elif r'_descendantrev' in vars(self):
863 803 # Otherwise propagate _descendantrev if we have one associated.
864 804 fctx._descendantrev = self._descendantrev
865 805 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
866 806 return fctx
867 807
868 808 def parents(self):
869 809 _path = self._path
870 810 fl = self._filelog
871 811 parents = self._filelog.parents(self._filenode)
872 812 pl = [(_path, node, fl) for node in parents if node != nullid]
873 813
874 814 r = fl.renamed(self._filenode)
875 815 if r:
876 816 # - In the simple rename case, both parent are nullid, pl is empty.
877 817 # - In case of merge, only one of the parent is null id and should
878 818 # be replaced with the rename information. This parent is -always-
879 819 # the first one.
880 820 #
881 821 # As null id have always been filtered out in the previous list
882 822 # comprehension, inserting to 0 will always result in "replacing
883 823 # first nullid parent with rename information.
884 824 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
885 825
886 826 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
887 827
888 828 def p1(self):
889 829 return self.parents()[0]
890 830
891 831 def p2(self):
892 832 p = self.parents()
893 833 if len(p) == 2:
894 834 return p[1]
895 835 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
896 836
897 837 def annotate(self, follow=False, skiprevs=None, diffopts=None):
898 838 """Returns a list of annotateline objects for each line in the file
899 839
900 840 - line.fctx is the filectx of the node where that line was last changed
901 841 - line.lineno is the line number at the first appearance in the managed
902 842 file
903 843 - line.text is the data on that line (including newline character)
904 844 """
905 845 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
906 846
907 847 def parents(f):
908 848 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
909 849 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
910 850 # from the topmost introrev (= srcrev) down to p.linkrev() if it
911 851 # isn't an ancestor of the srcrev.
912 852 f._changeid
913 853 pl = f.parents()
914 854
915 855 # Don't return renamed parents if we aren't following.
916 856 if not follow:
917 857 pl = [p for p in pl if p.path() == f.path()]
918 858
919 859 # renamed filectx won't have a filelog yet, so set it
920 860 # from the cache to save time
921 861 for p in pl:
922 862 if not r'_filelog' in p.__dict__:
923 863 p._filelog = getlog(p.path())
924 864
925 865 return pl
926 866
927 867 # use linkrev to find the first changeset where self appeared
928 868 base = self.introfilectx()
929 869 if getattr(base, '_ancestrycontext', None) is None:
930 870 cl = self._repo.changelog
931 871 if base.rev() is None:
932 872 # wctx is not inclusive, but works because _ancestrycontext
933 873 # is used to test filelog revisions
934 874 ac = cl.ancestors([p.rev() for p in base.parents()],
935 875 inclusive=True)
936 876 else:
937 877 ac = cl.ancestors([base.rev()], inclusive=True)
938 878 base._ancestrycontext = ac
939 879
940 880 return dagop.annotate(base, parents, skiprevs=skiprevs,
941 881 diffopts=diffopts)
942 882
943 883 def ancestors(self, followfirst=False):
944 884 visit = {}
945 885 c = self
946 886 if followfirst:
947 887 cut = 1
948 888 else:
949 889 cut = None
950 890
951 891 while True:
952 892 for parent in c.parents()[:cut]:
953 893 visit[(parent.linkrev(), parent.filenode())] = parent
954 894 if not visit:
955 895 break
956 896 c = visit.pop(max(visit))
957 897 yield c
958 898
959 899 def decodeddata(self):
960 900 """Returns `data()` after running repository decoding filters.
961 901
962 902 This is often equivalent to how the data would be expressed on disk.
963 903 """
964 904 return self._repo.wwritedata(self.path(), self.data())
965 905
966 906 class filectx(basefilectx):
967 907 """A filecontext object makes access to data related to a particular
968 908 filerevision convenient."""
969 909 def __init__(self, repo, path, changeid=None, fileid=None,
970 910 filelog=None, changectx=None):
971 911 """changeid can be a changeset revision, node, or tag.
972 912 fileid can be a file revision or node."""
973 913 self._repo = repo
974 914 self._path = path
975 915
976 916 assert (changeid is not None
977 917 or fileid is not None
978 918 or changectx is not None), \
979 919 ("bad args: changeid=%r, fileid=%r, changectx=%r"
980 920 % (changeid, fileid, changectx))
981 921
982 922 if filelog is not None:
983 923 self._filelog = filelog
984 924
985 925 if changeid is not None:
986 926 self._changeid = changeid
987 927 if changectx is not None:
988 928 self._changectx = changectx
989 929 if fileid is not None:
990 930 self._fileid = fileid
991 931
992 932 @propertycache
993 933 def _changectx(self):
994 934 try:
995 935 return self._repo[self._changeid]
996 936 except error.FilteredRepoLookupError:
997 937 # Linkrev may point to any revision in the repository. When the
998 938 # repository is filtered this may lead to `filectx` trying to build
999 939 # `changectx` for filtered revision. In such case we fallback to
1000 940 # creating `changectx` on the unfiltered version of the reposition.
1001 941 # This fallback should not be an issue because `changectx` from
1002 942 # `filectx` are not used in complex operations that care about
1003 943 # filtering.
1004 944 #
1005 945 # This fallback is a cheap and dirty fix that prevent several
1006 946 # crashes. It does not ensure the behavior is correct. However the
1007 947 # behavior was not correct before filtering either and "incorrect
1008 948 # behavior" is seen as better as "crash"
1009 949 #
1010 950 # Linkrevs have several serious troubles with filtering that are
1011 951 # complicated to solve. Proper handling of the issue here should be
1012 952 # considered when solving linkrev issue are on the table.
1013 953 return self._repo.unfiltered()[self._changeid]
1014 954
1015 955 def filectx(self, fileid, changeid=None):
1016 956 '''opens an arbitrary revision of the file without
1017 957 opening a new filelog'''
1018 958 return filectx(self._repo, self._path, fileid=fileid,
1019 959 filelog=self._filelog, changeid=changeid)
1020 960
1021 961 def rawdata(self):
1022 962 return self._filelog.revision(self._filenode, raw=True)
1023 963
1024 964 def rawflags(self):
1025 965 """low-level revlog flags"""
1026 966 return self._filelog.flags(self._filerev)
1027 967
1028 968 def data(self):
1029 969 try:
1030 970 return self._filelog.read(self._filenode)
1031 971 except error.CensoredNodeError:
1032 972 if self._repo.ui.config("censor", "policy") == "ignore":
1033 973 return ""
1034 974 raise error.Abort(_("censored node: %s") % short(self._filenode),
1035 975 hint=_("set censor.policy to ignore errors"))
1036 976
1037 977 def size(self):
1038 978 return self._filelog.size(self._filerev)
1039 979
1040 980 @propertycache
1041 981 def _copied(self):
1042 982 """check if file was actually renamed in this changeset revision
1043 983
1044 984 If rename logged in file revision, we report copy for changeset only
1045 985 if file revisions linkrev points back to the changeset in question
1046 986 or both changeset parents contain different file revisions.
1047 987 """
1048 988
1049 989 renamed = self._filelog.renamed(self._filenode)
1050 990 if not renamed:
1051 991 return None
1052 992
1053 993 if self.rev() == self.linkrev():
1054 994 return renamed
1055 995
1056 996 name = self.path()
1057 997 fnode = self._filenode
1058 998 for p in self._changectx.parents():
1059 999 try:
1060 1000 if fnode == p.filenode(name):
1061 1001 return None
1062 1002 except error.LookupError:
1063 1003 pass
1064 1004 return renamed
1065 1005
1066 1006 def children(self):
1067 1007 # hard for renames
1068 1008 c = self._filelog.children(self._filenode)
1069 1009 return [filectx(self._repo, self._path, fileid=x,
1070 1010 filelog=self._filelog) for x in c]
1071 1011
1072 1012 class committablectx(basectx):
1073 1013 """A committablectx object provides common functionality for a context that
1074 1014 wants the ability to commit, e.g. workingctx or memctx."""
1075 1015 def __init__(self, repo, text="", user=None, date=None, extra=None,
1076 1016 changes=None):
1077 1017 super(committablectx, self).__init__(repo)
1078 1018 self._rev = None
1079 1019 self._node = None
1080 1020 self._text = text
1081 1021 if date:
1082 1022 self._date = dateutil.parsedate(date)
1083 1023 if user:
1084 1024 self._user = user
1085 1025 if changes:
1086 1026 self._status = changes
1087 1027
1088 1028 self._extra = {}
1089 1029 if extra:
1090 1030 self._extra = extra.copy()
1091 1031 if 'branch' not in self._extra:
1092 1032 try:
1093 1033 branch = encoding.fromlocal(self._repo.dirstate.branch())
1094 1034 except UnicodeDecodeError:
1095 1035 raise error.Abort(_('branch name not in UTF-8!'))
1096 1036 self._extra['branch'] = branch
1097 1037 if self._extra['branch'] == '':
1098 1038 self._extra['branch'] = 'default'
1099 1039
1100 1040 def __bytes__(self):
1101 1041 return bytes(self._parents[0]) + "+"
1102 1042
1103 1043 __str__ = encoding.strmethod(__bytes__)
1104 1044
1105 1045 def __nonzero__(self):
1106 1046 return True
1107 1047
1108 1048 __bool__ = __nonzero__
1109 1049
1110 1050 def _buildflagfunc(self):
1111 1051 # Create a fallback function for getting file flags when the
1112 1052 # filesystem doesn't support them
1113 1053
1114 1054 copiesget = self._repo.dirstate.copies().get
1115 1055 parents = self.parents()
1116 1056 if len(parents) < 2:
1117 1057 # when we have one parent, it's easy: copy from parent
1118 1058 man = parents[0].manifest()
1119 1059 def func(f):
1120 1060 f = copiesget(f, f)
1121 1061 return man.flags(f)
1122 1062 else:
1123 1063 # merges are tricky: we try to reconstruct the unstored
1124 1064 # result from the merge (issue1802)
1125 1065 p1, p2 = parents
1126 1066 pa = p1.ancestor(p2)
1127 1067 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1128 1068
1129 1069 def func(f):
1130 1070 f = copiesget(f, f) # may be wrong for merges with copies
1131 1071 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1132 1072 if fl1 == fl2:
1133 1073 return fl1
1134 1074 if fl1 == fla:
1135 1075 return fl2
1136 1076 if fl2 == fla:
1137 1077 return fl1
1138 1078 return '' # punt for conflicts
1139 1079
1140 1080 return func
1141 1081
1142 1082 @propertycache
1143 1083 def _flagfunc(self):
1144 1084 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1145 1085
1146 1086 @propertycache
1147 1087 def _status(self):
1148 1088 return self._repo.status()
1149 1089
1150 1090 @propertycache
1151 1091 def _user(self):
1152 1092 return self._repo.ui.username()
1153 1093
1154 1094 @propertycache
1155 1095 def _date(self):
1156 1096 ui = self._repo.ui
1157 1097 date = ui.configdate('devel', 'default-date')
1158 1098 if date is None:
1159 1099 date = dateutil.makedate()
1160 1100 return date
1161 1101
1162 1102 def subrev(self, subpath):
1163 1103 return None
1164 1104
1165 1105 def manifestnode(self):
1166 1106 return None
1167 1107 def user(self):
1168 1108 return self._user or self._repo.ui.username()
1169 1109 def date(self):
1170 1110 return self._date
1171 1111 def description(self):
1172 1112 return self._text
1173 1113 def files(self):
1174 1114 return sorted(self._status.modified + self._status.added +
1175 1115 self._status.removed)
1176 1116
1177 1117 def modified(self):
1178 1118 return self._status.modified
1179 1119 def added(self):
1180 1120 return self._status.added
1181 1121 def removed(self):
1182 1122 return self._status.removed
1183 1123 def deleted(self):
1184 1124 return self._status.deleted
1185 1125 def branch(self):
1186 1126 return encoding.tolocal(self._extra['branch'])
1187 1127 def closesbranch(self):
1188 1128 return 'close' in self._extra
1189 1129 def extra(self):
1190 1130 return self._extra
1191 1131
1192 1132 def isinmemory(self):
1193 1133 return False
1194 1134
1195 1135 def tags(self):
1196 1136 return []
1197 1137
1198 1138 def bookmarks(self):
1199 1139 b = []
1200 1140 for p in self.parents():
1201 1141 b.extend(p.bookmarks())
1202 1142 return b
1203 1143
1204 1144 def phase(self):
1205 1145 phase = phases.draft # default phase to draft
1206 1146 for p in self.parents():
1207 1147 phase = max(phase, p.phase())
1208 1148 return phase
1209 1149
1210 1150 def hidden(self):
1211 1151 return False
1212 1152
1213 1153 def children(self):
1214 1154 return []
1215 1155
1216 1156 def flags(self, path):
1217 1157 if r'_manifest' in self.__dict__:
1218 1158 try:
1219 1159 return self._manifest.flags(path)
1220 1160 except KeyError:
1221 1161 return ''
1222 1162
1223 1163 try:
1224 1164 return self._flagfunc(path)
1225 1165 except OSError:
1226 1166 return ''
1227 1167
1228 1168 def ancestor(self, c2):
1229 1169 """return the "best" ancestor context of self and c2"""
1230 1170 return self._parents[0].ancestor(c2) # punt on two parents for now
1231 1171
1232 1172 def walk(self, match):
1233 1173 '''Generates matching file names.'''
1234 1174 return sorted(self._repo.dirstate.walk(match,
1235 1175 subrepos=sorted(self.substate),
1236 1176 unknown=True, ignored=False))
1237 1177
1238 1178 def matches(self, match):
1239 1179 ds = self._repo.dirstate
1240 1180 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1241 1181
1242 1182 def ancestors(self):
1243 1183 for p in self._parents:
1244 1184 yield p
1245 1185 for a in self._repo.changelog.ancestors(
1246 1186 [p.rev() for p in self._parents]):
1247 1187 yield self._repo[a]
1248 1188
1249 1189 def markcommitted(self, node):
1250 1190 """Perform post-commit cleanup necessary after committing this ctx
1251 1191
1252 1192 Specifically, this updates backing stores this working context
1253 1193 wraps to reflect the fact that the changes reflected by this
1254 1194 workingctx have been committed. For example, it marks
1255 1195 modified and added files as normal in the dirstate.
1256 1196
1257 1197 """
1258 1198
1259 1199 with self._repo.dirstate.parentchange():
1260 1200 for f in self.modified() + self.added():
1261 1201 self._repo.dirstate.normal(f)
1262 1202 for f in self.removed():
1263 1203 self._repo.dirstate.drop(f)
1264 1204 self._repo.dirstate.setparents(node)
1265 1205
1266 1206 # write changes out explicitly, because nesting wlock at
1267 1207 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1268 1208 # from immediately doing so for subsequent changing files
1269 1209 self._repo.dirstate.write(self._repo.currenttransaction())
1270 1210
1271 1211 def dirty(self, missing=False, merge=True, branch=True):
1272 1212 return False
1273 1213
1274 1214 class workingctx(committablectx):
1275 1215 """A workingctx object makes access to data related to
1276 1216 the current working directory convenient.
1277 1217 date - any valid date string or (unixtime, offset), or None.
1278 1218 user - username string, or None.
1279 1219 extra - a dictionary of extra values, or None.
1280 1220 changes - a list of file lists as returned by localrepo.status()
1281 1221 or None to use the repository status.
1282 1222 """
1283 1223 def __init__(self, repo, text="", user=None, date=None, extra=None,
1284 1224 changes=None):
1285 1225 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1286 1226
1287 1227 def __iter__(self):
1288 1228 d = self._repo.dirstate
1289 1229 for f in d:
1290 1230 if d[f] != 'r':
1291 1231 yield f
1292 1232
1293 1233 def __contains__(self, key):
1294 1234 return self._repo.dirstate[key] not in "?r"
1295 1235
1296 1236 def hex(self):
1297 1237 return hex(wdirid)
1298 1238
1299 1239 @propertycache
1300 1240 def _parents(self):
1301 1241 p = self._repo.dirstate.parents()
1302 1242 if p[1] == nullid:
1303 1243 p = p[:-1]
1304 1244 return [self._repo[x] for x in p]
1305 1245
1306 1246 def _fileinfo(self, path):
1307 1247 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1308 1248 self._manifest
1309 1249 return super(workingctx, self)._fileinfo(path)
1310 1250
1311 1251 def filectx(self, path, filelog=None):
1312 1252 """get a file context from the working directory"""
1313 1253 return workingfilectx(self._repo, path, workingctx=self,
1314 1254 filelog=filelog)
1315 1255
1316 1256 def dirty(self, missing=False, merge=True, branch=True):
1317 1257 "check whether a working directory is modified"
1318 1258 # check subrepos first
1319 1259 for s in sorted(self.substate):
1320 1260 if self.sub(s).dirty(missing=missing):
1321 1261 return True
1322 1262 # check current working dir
1323 1263 return ((merge and self.p2()) or
1324 1264 (branch and self.branch() != self.p1().branch()) or
1325 1265 self.modified() or self.added() or self.removed() or
1326 1266 (missing and self.deleted()))
1327 1267
1328 1268 def add(self, list, prefix=""):
1329 1269 with self._repo.wlock():
1330 1270 ui, ds = self._repo.ui, self._repo.dirstate
1331 1271 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1332 1272 rejected = []
1333 1273 lstat = self._repo.wvfs.lstat
1334 1274 for f in list:
1335 1275 # ds.pathto() returns an absolute file when this is invoked from
1336 1276 # the keyword extension. That gets flagged as non-portable on
1337 1277 # Windows, since it contains the drive letter and colon.
1338 1278 scmutil.checkportable(ui, os.path.join(prefix, f))
1339 1279 try:
1340 1280 st = lstat(f)
1341 1281 except OSError:
1342 1282 ui.warn(_("%s does not exist!\n") % uipath(f))
1343 1283 rejected.append(f)
1344 1284 continue
1345 1285 limit = ui.configbytes('ui', 'large-file-limit')
1346 1286 if limit != 0 and st.st_size > limit:
1347 1287 ui.warn(_("%s: up to %d MB of RAM may be required "
1348 1288 "to manage this file\n"
1349 1289 "(use 'hg revert %s' to cancel the "
1350 1290 "pending addition)\n")
1351 1291 % (f, 3 * st.st_size // 1000000, uipath(f)))
1352 1292 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1353 1293 ui.warn(_("%s not added: only files and symlinks "
1354 1294 "supported currently\n") % uipath(f))
1355 1295 rejected.append(f)
1356 1296 elif ds[f] in 'amn':
1357 1297 ui.warn(_("%s already tracked!\n") % uipath(f))
1358 1298 elif ds[f] == 'r':
1359 1299 ds.normallookup(f)
1360 1300 else:
1361 1301 ds.add(f)
1362 1302 return rejected
1363 1303
1364 1304 def forget(self, files, prefix=""):
1365 1305 with self._repo.wlock():
1366 1306 ds = self._repo.dirstate
1367 1307 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1368 1308 rejected = []
1369 1309 for f in files:
1370 1310 if f not in self._repo.dirstate:
1371 1311 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1372 1312 rejected.append(f)
1373 1313 elif self._repo.dirstate[f] != 'a':
1374 1314 self._repo.dirstate.remove(f)
1375 1315 else:
1376 1316 self._repo.dirstate.drop(f)
1377 1317 return rejected
1378 1318
1379 1319 def undelete(self, list):
1380 1320 pctxs = self.parents()
1381 1321 with self._repo.wlock():
1382 1322 ds = self._repo.dirstate
1383 1323 for f in list:
1384 1324 if self._repo.dirstate[f] != 'r':
1385 1325 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1386 1326 else:
1387 1327 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1388 1328 t = fctx.data()
1389 1329 self._repo.wwrite(f, t, fctx.flags())
1390 1330 self._repo.dirstate.normal(f)
1391 1331
1392 1332 def copy(self, source, dest):
1393 1333 try:
1394 1334 st = self._repo.wvfs.lstat(dest)
1395 1335 except OSError as err:
1396 1336 if err.errno != errno.ENOENT:
1397 1337 raise
1398 1338 self._repo.ui.warn(_("%s does not exist!\n")
1399 1339 % self._repo.dirstate.pathto(dest))
1400 1340 return
1401 1341 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1402 1342 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1403 1343 "symbolic link\n")
1404 1344 % self._repo.dirstate.pathto(dest))
1405 1345 else:
1406 1346 with self._repo.wlock():
1407 1347 if self._repo.dirstate[dest] in '?':
1408 1348 self._repo.dirstate.add(dest)
1409 1349 elif self._repo.dirstate[dest] in 'r':
1410 1350 self._repo.dirstate.normallookup(dest)
1411 1351 self._repo.dirstate.copy(source, dest)
1412 1352
1413 1353 def match(self, pats=None, include=None, exclude=None, default='glob',
1414 1354 listsubrepos=False, badfn=None):
1415 1355 r = self._repo
1416 1356
1417 1357 # Only a case insensitive filesystem needs magic to translate user input
1418 1358 # to actual case in the filesystem.
1419 1359 icasefs = not util.fscasesensitive(r.root)
1420 1360 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1421 1361 default, auditor=r.auditor, ctx=self,
1422 1362 listsubrepos=listsubrepos, badfn=badfn,
1423 1363 icasefs=icasefs)
1424 1364
1425 1365 def _filtersuspectsymlink(self, files):
1426 1366 if not files or self._repo.dirstate._checklink:
1427 1367 return files
1428 1368
1429 1369 # Symlink placeholders may get non-symlink-like contents
1430 1370 # via user error or dereferencing by NFS or Samba servers,
1431 1371 # so we filter out any placeholders that don't look like a
1432 1372 # symlink
1433 1373 sane = []
1434 1374 for f in files:
1435 1375 if self.flags(f) == 'l':
1436 1376 d = self[f].data()
1437 1377 if (d == '' or len(d) >= 1024 or '\n' in d
1438 1378 or stringutil.binary(d)):
1439 1379 self._repo.ui.debug('ignoring suspect symlink placeholder'
1440 1380 ' "%s"\n' % f)
1441 1381 continue
1442 1382 sane.append(f)
1443 1383 return sane
1444 1384
1445 1385 def _checklookup(self, files):
1446 1386 # check for any possibly clean files
1447 1387 if not files:
1448 1388 return [], [], []
1449 1389
1450 1390 modified = []
1451 1391 deleted = []
1452 1392 fixup = []
1453 1393 pctx = self._parents[0]
1454 1394 # do a full compare of any files that might have changed
1455 1395 for f in sorted(files):
1456 1396 try:
1457 1397 # This will return True for a file that got replaced by a
1458 1398 # directory in the interim, but fixing that is pretty hard.
1459 1399 if (f not in pctx or self.flags(f) != pctx.flags(f)
1460 1400 or pctx[f].cmp(self[f])):
1461 1401 modified.append(f)
1462 1402 else:
1463 1403 fixup.append(f)
1464 1404 except (IOError, OSError):
1465 1405 # A file become inaccessible in between? Mark it as deleted,
1466 1406 # matching dirstate behavior (issue5584).
1467 1407 # The dirstate has more complex behavior around whether a
1468 1408 # missing file matches a directory, etc, but we don't need to
1469 1409 # bother with that: if f has made it to this point, we're sure
1470 1410 # it's in the dirstate.
1471 1411 deleted.append(f)
1472 1412
1473 1413 return modified, deleted, fixup
1474 1414
1475 1415 def _poststatusfixup(self, status, fixup):
1476 1416 """update dirstate for files that are actually clean"""
1477 1417 poststatus = self._repo.postdsstatus()
1478 1418 if fixup or poststatus:
1479 1419 try:
1480 1420 oldid = self._repo.dirstate.identity()
1481 1421
1482 1422 # updating the dirstate is optional
1483 1423 # so we don't wait on the lock
1484 1424 # wlock can invalidate the dirstate, so cache normal _after_
1485 1425 # taking the lock
1486 1426 with self._repo.wlock(False):
1487 1427 if self._repo.dirstate.identity() == oldid:
1488 1428 if fixup:
1489 1429 normal = self._repo.dirstate.normal
1490 1430 for f in fixup:
1491 1431 normal(f)
1492 1432 # write changes out explicitly, because nesting
1493 1433 # wlock at runtime may prevent 'wlock.release()'
1494 1434 # after this block from doing so for subsequent
1495 1435 # changing files
1496 1436 tr = self._repo.currenttransaction()
1497 1437 self._repo.dirstate.write(tr)
1498 1438
1499 1439 if poststatus:
1500 1440 for ps in poststatus:
1501 1441 ps(self, status)
1502 1442 else:
1503 1443 # in this case, writing changes out breaks
1504 1444 # consistency, because .hg/dirstate was
1505 1445 # already changed simultaneously after last
1506 1446 # caching (see also issue5584 for detail)
1507 1447 self._repo.ui.debug('skip updating dirstate: '
1508 1448 'identity mismatch\n')
1509 1449 except error.LockError:
1510 1450 pass
1511 1451 finally:
1512 1452 # Even if the wlock couldn't be grabbed, clear out the list.
1513 1453 self._repo.clearpostdsstatus()
1514 1454
1515 1455 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1516 1456 '''Gets the status from the dirstate -- internal use only.'''
1517 1457 subrepos = []
1518 1458 if '.hgsub' in self:
1519 1459 subrepos = sorted(self.substate)
1520 1460 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1521 1461 clean=clean, unknown=unknown)
1522 1462
1523 1463 # check for any possibly clean files
1524 1464 fixup = []
1525 1465 if cmp:
1526 1466 modified2, deleted2, fixup = self._checklookup(cmp)
1527 1467 s.modified.extend(modified2)
1528 1468 s.deleted.extend(deleted2)
1529 1469
1530 1470 if fixup and clean:
1531 1471 s.clean.extend(fixup)
1532 1472
1533 1473 self._poststatusfixup(s, fixup)
1534 1474
1535 1475 if match.always():
1536 1476 # cache for performance
1537 1477 if s.unknown or s.ignored or s.clean:
1538 1478 # "_status" is cached with list*=False in the normal route
1539 1479 self._status = scmutil.status(s.modified, s.added, s.removed,
1540 1480 s.deleted, [], [], [])
1541 1481 else:
1542 1482 self._status = s
1543 1483
1544 1484 return s
1545 1485
1546 1486 @propertycache
1547 1487 def _manifest(self):
1548 1488 """generate a manifest corresponding to the values in self._status
1549 1489
1550 1490 This reuse the file nodeid from parent, but we use special node
1551 1491 identifiers for added and modified files. This is used by manifests
1552 1492 merge to see that files are different and by update logic to avoid
1553 1493 deleting newly added files.
1554 1494 """
1555 1495 return self._buildstatusmanifest(self._status)
1556 1496
1557 1497 def _buildstatusmanifest(self, status):
1558 1498 """Builds a manifest that includes the given status results."""
1559 1499 parents = self.parents()
1560 1500
1561 1501 man = parents[0].manifest().copy()
1562 1502
1563 1503 ff = self._flagfunc
1564 1504 for i, l in ((addednodeid, status.added),
1565 1505 (modifiednodeid, status.modified)):
1566 1506 for f in l:
1567 1507 man[f] = i
1568 1508 try:
1569 1509 man.setflag(f, ff(f))
1570 1510 except OSError:
1571 1511 pass
1572 1512
1573 1513 for f in status.deleted + status.removed:
1574 1514 if f in man:
1575 1515 del man[f]
1576 1516
1577 1517 return man
1578 1518
1579 1519 def _buildstatus(self, other, s, match, listignored, listclean,
1580 1520 listunknown):
1581 1521 """build a status with respect to another context
1582 1522
1583 1523 This includes logic for maintaining the fast path of status when
1584 1524 comparing the working directory against its parent, which is to skip
1585 1525 building a new manifest if self (working directory) is not comparing
1586 1526 against its parent (repo['.']).
1587 1527 """
1588 1528 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1589 1529 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1590 1530 # might have accidentally ended up with the entire contents of the file
1591 1531 # they are supposed to be linking to.
1592 1532 s.modified[:] = self._filtersuspectsymlink(s.modified)
1593 1533 if other != self._repo['.']:
1594 1534 s = super(workingctx, self)._buildstatus(other, s, match,
1595 1535 listignored, listclean,
1596 1536 listunknown)
1597 1537 return s
1598 1538
1599 1539 def _matchstatus(self, other, match):
1600 1540 """override the match method with a filter for directory patterns
1601 1541
1602 1542 We use inheritance to customize the match.bad method only in cases of
1603 1543 workingctx since it belongs only to the working directory when
1604 1544 comparing against the parent changeset.
1605 1545
1606 1546 If we aren't comparing against the working directory's parent, then we
1607 1547 just use the default match object sent to us.
1608 1548 """
1609 1549 if other != self._repo['.']:
1610 1550 def bad(f, msg):
1611 1551 # 'f' may be a directory pattern from 'match.files()',
1612 1552 # so 'f not in ctx1' is not enough
1613 1553 if f not in other and not other.hasdir(f):
1614 1554 self._repo.ui.warn('%s: %s\n' %
1615 1555 (self._repo.dirstate.pathto(f), msg))
1616 1556 match.bad = bad
1617 1557 return match
1618 1558
1619 1559 def markcommitted(self, node):
1620 1560 super(workingctx, self).markcommitted(node)
1621 1561
1622 1562 sparse.aftercommit(self._repo, node)
1623 1563
1624 1564 class committablefilectx(basefilectx):
1625 1565 """A committablefilectx provides common functionality for a file context
1626 1566 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1627 1567 def __init__(self, repo, path, filelog=None, ctx=None):
1628 1568 self._repo = repo
1629 1569 self._path = path
1630 1570 self._changeid = None
1631 1571 self._filerev = self._filenode = None
1632 1572
1633 1573 if filelog is not None:
1634 1574 self._filelog = filelog
1635 1575 if ctx:
1636 1576 self._changectx = ctx
1637 1577
1638 1578 def __nonzero__(self):
1639 1579 return True
1640 1580
1641 1581 __bool__ = __nonzero__
1642 1582
1643 1583 def linkrev(self):
1644 1584 # linked to self._changectx no matter if file is modified or not
1645 1585 return self.rev()
1646 1586
1647 1587 def parents(self):
1648 1588 '''return parent filectxs, following copies if necessary'''
1649 1589 def filenode(ctx, path):
1650 1590 return ctx._manifest.get(path, nullid)
1651 1591
1652 1592 path = self._path
1653 1593 fl = self._filelog
1654 1594 pcl = self._changectx._parents
1655 1595 renamed = self.renamed()
1656 1596
1657 1597 if renamed:
1658 1598 pl = [renamed + (None,)]
1659 1599 else:
1660 1600 pl = [(path, filenode(pcl[0], path), fl)]
1661 1601
1662 1602 for pc in pcl[1:]:
1663 1603 pl.append((path, filenode(pc, path), fl))
1664 1604
1665 1605 return [self._parentfilectx(p, fileid=n, filelog=l)
1666 1606 for p, n, l in pl if n != nullid]
1667 1607
1668 1608 def children(self):
1669 1609 return []
1670 1610
1671 1611 class workingfilectx(committablefilectx):
1672 1612 """A workingfilectx object makes access to data related to a particular
1673 1613 file in the working directory convenient."""
1674 1614 def __init__(self, repo, path, filelog=None, workingctx=None):
1675 1615 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1676 1616
1677 1617 @propertycache
1678 1618 def _changectx(self):
1679 1619 return workingctx(self._repo)
1680 1620
1681 1621 def data(self):
1682 1622 return self._repo.wread(self._path)
1683 1623 def renamed(self):
1684 1624 rp = self._repo.dirstate.copied(self._path)
1685 1625 if not rp:
1686 1626 return None
1687 1627 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1688 1628
1689 1629 def size(self):
1690 1630 return self._repo.wvfs.lstat(self._path).st_size
1691 1631 def date(self):
1692 1632 t, tz = self._changectx.date()
1693 1633 try:
1694 1634 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1695 1635 except OSError as err:
1696 1636 if err.errno != errno.ENOENT:
1697 1637 raise
1698 1638 return (t, tz)
1699 1639
1700 1640 def exists(self):
1701 1641 return self._repo.wvfs.exists(self._path)
1702 1642
1703 1643 def lexists(self):
1704 1644 return self._repo.wvfs.lexists(self._path)
1705 1645
1706 1646 def audit(self):
1707 1647 return self._repo.wvfs.audit(self._path)
1708 1648
1709 1649 def cmp(self, fctx):
1710 1650 """compare with other file context
1711 1651
1712 1652 returns True if different than fctx.
1713 1653 """
1714 1654 # fctx should be a filectx (not a workingfilectx)
1715 1655 # invert comparison to reuse the same code path
1716 1656 return fctx.cmp(self)
1717 1657
1718 1658 def remove(self, ignoremissing=False):
1719 1659 """wraps unlink for a repo's working directory"""
1720 1660 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1721 1661 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1722 1662 rmdir=rmdir)
1723 1663
1724 1664 def write(self, data, flags, backgroundclose=False, **kwargs):
1725 1665 """wraps repo.wwrite"""
1726 1666 self._repo.wwrite(self._path, data, flags,
1727 1667 backgroundclose=backgroundclose,
1728 1668 **kwargs)
1729 1669
1730 1670 def markcopied(self, src):
1731 1671 """marks this file a copy of `src`"""
1732 1672 if self._repo.dirstate[self._path] in "nma":
1733 1673 self._repo.dirstate.copy(src, self._path)
1734 1674
1735 1675 def clearunknown(self):
1736 1676 """Removes conflicting items in the working directory so that
1737 1677 ``write()`` can be called successfully.
1738 1678 """
1739 1679 wvfs = self._repo.wvfs
1740 1680 f = self._path
1741 1681 wvfs.audit(f)
1742 1682 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1743 1683 # remove files under the directory as they should already be
1744 1684 # warned and backed up
1745 1685 if wvfs.isdir(f) and not wvfs.islink(f):
1746 1686 wvfs.rmtree(f, forcibly=True)
1747 1687 for p in reversed(list(util.finddirs(f))):
1748 1688 if wvfs.isfileorlink(p):
1749 1689 wvfs.unlink(p)
1750 1690 break
1751 1691 else:
1752 1692 # don't remove files if path conflicts are not processed
1753 1693 if wvfs.isdir(f) and not wvfs.islink(f):
1754 1694 wvfs.removedirs(f)
1755 1695
1756 1696 def setflags(self, l, x):
1757 1697 self._repo.wvfs.setflags(self._path, l, x)
1758 1698
1759 1699 class overlayworkingctx(committablectx):
1760 1700 """Wraps another mutable context with a write-back cache that can be
1761 1701 converted into a commit context.
1762 1702
1763 1703 self._cache[path] maps to a dict with keys: {
1764 1704 'exists': bool?
1765 1705 'date': date?
1766 1706 'data': str?
1767 1707 'flags': str?
1768 1708 'copied': str? (path or None)
1769 1709 }
1770 1710 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1771 1711 is `False`, the file was deleted.
1772 1712 """
1773 1713
1774 1714 def __init__(self, repo):
1775 1715 super(overlayworkingctx, self).__init__(repo)
1776 1716 self.clean()
1777 1717
1778 1718 def setbase(self, wrappedctx):
1779 1719 self._wrappedctx = wrappedctx
1780 1720 self._parents = [wrappedctx]
1781 1721 # Drop old manifest cache as it is now out of date.
1782 1722 # This is necessary when, e.g., rebasing several nodes with one
1783 1723 # ``overlayworkingctx`` (e.g. with --collapse).
1784 1724 util.clearcachedproperty(self, '_manifest')
1785 1725
1786 1726 def data(self, path):
1787 1727 if self.isdirty(path):
1788 1728 if self._cache[path]['exists']:
1789 1729 if self._cache[path]['data']:
1790 1730 return self._cache[path]['data']
1791 1731 else:
1792 1732 # Must fallback here, too, because we only set flags.
1793 1733 return self._wrappedctx[path].data()
1794 1734 else:
1795 1735 raise error.ProgrammingError("No such file or directory: %s" %
1796 1736 path)
1797 1737 else:
1798 1738 return self._wrappedctx[path].data()
1799 1739
1800 1740 @propertycache
1801 1741 def _manifest(self):
1802 1742 parents = self.parents()
1803 1743 man = parents[0].manifest().copy()
1804 1744
1805 1745 flag = self._flagfunc
1806 1746 for path in self.added():
1807 1747 man[path] = addednodeid
1808 1748 man.setflag(path, flag(path))
1809 1749 for path in self.modified():
1810 1750 man[path] = modifiednodeid
1811 1751 man.setflag(path, flag(path))
1812 1752 for path in self.removed():
1813 1753 del man[path]
1814 1754 return man
1815 1755
1816 1756 @propertycache
1817 1757 def _flagfunc(self):
1818 1758 def f(path):
1819 1759 return self._cache[path]['flags']
1820 1760 return f
1821 1761
1822 1762 def files(self):
1823 1763 return sorted(self.added() + self.modified() + self.removed())
1824 1764
1825 1765 def modified(self):
1826 1766 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1827 1767 self._existsinparent(f)]
1828 1768
1829 1769 def added(self):
1830 1770 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1831 1771 not self._existsinparent(f)]
1832 1772
1833 1773 def removed(self):
1834 1774 return [f for f in self._cache.keys() if
1835 1775 not self._cache[f]['exists'] and self._existsinparent(f)]
1836 1776
1837 1777 def isinmemory(self):
1838 1778 return True
1839 1779
1840 1780 def filedate(self, path):
1841 1781 if self.isdirty(path):
1842 1782 return self._cache[path]['date']
1843 1783 else:
1844 1784 return self._wrappedctx[path].date()
1845 1785
1846 1786 def markcopied(self, path, origin):
1847 1787 if self.isdirty(path):
1848 1788 self._cache[path]['copied'] = origin
1849 1789 else:
1850 1790 raise error.ProgrammingError('markcopied() called on clean context')
1851 1791
1852 1792 def copydata(self, path):
1853 1793 if self.isdirty(path):
1854 1794 return self._cache[path]['copied']
1855 1795 else:
1856 1796 raise error.ProgrammingError('copydata() called on clean context')
1857 1797
1858 1798 def flags(self, path):
1859 1799 if self.isdirty(path):
1860 1800 if self._cache[path]['exists']:
1861 1801 return self._cache[path]['flags']
1862 1802 else:
1863 1803 raise error.ProgrammingError("No such file or directory: %s" %
1864 1804 self._path)
1865 1805 else:
1866 1806 return self._wrappedctx[path].flags()
1867 1807
1868 1808 def _existsinparent(self, path):
1869 1809 try:
1870 1810 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1871 1811 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1872 1812 # with an ``exists()`` function.
1873 1813 self._wrappedctx[path]
1874 1814 return True
1875 1815 except error.ManifestLookupError:
1876 1816 return False
1877 1817
1878 1818 def _auditconflicts(self, path):
1879 1819 """Replicates conflict checks done by wvfs.write().
1880 1820
1881 1821 Since we never write to the filesystem and never call `applyupdates` in
1882 1822 IMM, we'll never check that a path is actually writable -- e.g., because
1883 1823 it adds `a/foo`, but `a` is actually a file in the other commit.
1884 1824 """
1885 1825 def fail(path, component):
1886 1826 # p1() is the base and we're receiving "writes" for p2()'s
1887 1827 # files.
1888 1828 if 'l' in self.p1()[component].flags():
1889 1829 raise error.Abort("error: %s conflicts with symlink %s "
1890 1830 "in %s." % (path, component,
1891 1831 self.p1().rev()))
1892 1832 else:
1893 1833 raise error.Abort("error: '%s' conflicts with file '%s' in "
1894 1834 "%s." % (path, component,
1895 1835 self.p1().rev()))
1896 1836
1897 1837 # Test that each new directory to be created to write this path from p2
1898 1838 # is not a file in p1.
1899 1839 components = path.split('/')
1900 1840 for i in pycompat.xrange(len(components)):
1901 1841 component = "/".join(components[0:i])
1902 1842 if component in self.p1() and self._cache[component]['exists']:
1903 1843 fail(path, component)
1904 1844
1905 1845 # Test the other direction -- that this path from p2 isn't a directory
1906 1846 # in p1 (test that p1 doesn't any paths matching `path/*`).
1907 1847 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1908 1848 matches = self.p1().manifest().matches(match)
1909 1849 mfiles = matches.keys()
1910 1850 if len(mfiles) > 0:
1911 1851 if len(mfiles) == 1 and mfiles[0] == path:
1912 1852 return
1913 1853 # omit the files which are deleted in current IMM wctx
1914 1854 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1915 1855 if not mfiles:
1916 1856 return
1917 1857 raise error.Abort("error: file '%s' cannot be written because "
1918 1858 " '%s/' is a folder in %s (containing %d "
1919 1859 "entries: %s)"
1920 1860 % (path, path, self.p1(), len(mfiles),
1921 1861 ', '.join(mfiles)))
1922 1862
1923 1863 def write(self, path, data, flags='', **kwargs):
1924 1864 if data is None:
1925 1865 raise error.ProgrammingError("data must be non-None")
1926 1866 self._auditconflicts(path)
1927 1867 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1928 1868 flags=flags)
1929 1869
1930 1870 def setflags(self, path, l, x):
1931 1871 flag = ''
1932 1872 if l:
1933 1873 flag = 'l'
1934 1874 elif x:
1935 1875 flag = 'x'
1936 1876 self._markdirty(path, exists=True, date=dateutil.makedate(),
1937 1877 flags=flag)
1938 1878
1939 1879 def remove(self, path):
1940 1880 self._markdirty(path, exists=False)
1941 1881
1942 1882 def exists(self, path):
1943 1883 """exists behaves like `lexists`, but needs to follow symlinks and
1944 1884 return False if they are broken.
1945 1885 """
1946 1886 if self.isdirty(path):
1947 1887 # If this path exists and is a symlink, "follow" it by calling
1948 1888 # exists on the destination path.
1949 1889 if (self._cache[path]['exists'] and
1950 1890 'l' in self._cache[path]['flags']):
1951 1891 return self.exists(self._cache[path]['data'].strip())
1952 1892 else:
1953 1893 return self._cache[path]['exists']
1954 1894
1955 1895 return self._existsinparent(path)
1956 1896
1957 1897 def lexists(self, path):
1958 1898 """lexists returns True if the path exists"""
1959 1899 if self.isdirty(path):
1960 1900 return self._cache[path]['exists']
1961 1901
1962 1902 return self._existsinparent(path)
1963 1903
1964 1904 def size(self, path):
1965 1905 if self.isdirty(path):
1966 1906 if self._cache[path]['exists']:
1967 1907 return len(self._cache[path]['data'])
1968 1908 else:
1969 1909 raise error.ProgrammingError("No such file or directory: %s" %
1970 1910 self._path)
1971 1911 return self._wrappedctx[path].size()
1972 1912
1973 1913 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1974 1914 user=None, editor=None):
1975 1915 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1976 1916 committed.
1977 1917
1978 1918 ``text`` is the commit message.
1979 1919 ``parents`` (optional) are rev numbers.
1980 1920 """
1981 1921 # Default parents to the wrapped contexts' if not passed.
1982 1922 if parents is None:
1983 1923 parents = self._wrappedctx.parents()
1984 1924 if len(parents) == 1:
1985 1925 parents = (parents[0], None)
1986 1926
1987 1927 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1988 1928 if parents[1] is None:
1989 1929 parents = (self._repo[parents[0]], None)
1990 1930 else:
1991 1931 parents = (self._repo[parents[0]], self._repo[parents[1]])
1992 1932
1993 1933 files = self._cache.keys()
1994 1934 def getfile(repo, memctx, path):
1995 1935 if self._cache[path]['exists']:
1996 1936 return memfilectx(repo, memctx, path,
1997 1937 self._cache[path]['data'],
1998 1938 'l' in self._cache[path]['flags'],
1999 1939 'x' in self._cache[path]['flags'],
2000 1940 self._cache[path]['copied'])
2001 1941 else:
2002 1942 # Returning None, but including the path in `files`, is
2003 1943 # necessary for memctx to register a deletion.
2004 1944 return None
2005 1945 return memctx(self._repo, parents, text, files, getfile, date=date,
2006 1946 extra=extra, user=user, branch=branch, editor=editor)
2007 1947
2008 1948 def isdirty(self, path):
2009 1949 return path in self._cache
2010 1950
2011 1951 def isempty(self):
2012 1952 # We need to discard any keys that are actually clean before the empty
2013 1953 # commit check.
2014 1954 self._compact()
2015 1955 return len(self._cache) == 0
2016 1956
2017 1957 def clean(self):
2018 1958 self._cache = {}
2019 1959
2020 1960 def _compact(self):
2021 1961 """Removes keys from the cache that are actually clean, by comparing
2022 1962 them with the underlying context.
2023 1963
2024 1964 This can occur during the merge process, e.g. by passing --tool :local
2025 1965 to resolve a conflict.
2026 1966 """
2027 1967 keys = []
2028 1968 for path in self._cache.keys():
2029 1969 cache = self._cache[path]
2030 1970 try:
2031 1971 underlying = self._wrappedctx[path]
2032 1972 if (underlying.data() == cache['data'] and
2033 1973 underlying.flags() == cache['flags']):
2034 1974 keys.append(path)
2035 1975 except error.ManifestLookupError:
2036 1976 # Path not in the underlying manifest (created).
2037 1977 continue
2038 1978
2039 1979 for path in keys:
2040 1980 del self._cache[path]
2041 1981 return keys
2042 1982
2043 1983 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2044 1984 # data not provided, let's see if we already have some; if not, let's
2045 1985 # grab it from our underlying context, so that we always have data if
2046 1986 # the file is marked as existing.
2047 1987 if exists and data is None:
2048 1988 oldentry = self._cache.get(path) or {}
2049 1989 data = oldentry.get('data') or self._wrappedctx[path].data()
2050 1990
2051 1991 self._cache[path] = {
2052 1992 'exists': exists,
2053 1993 'data': data,
2054 1994 'date': date,
2055 1995 'flags': flags,
2056 1996 'copied': None,
2057 1997 }
2058 1998
2059 1999 def filectx(self, path, filelog=None):
2060 2000 return overlayworkingfilectx(self._repo, path, parent=self,
2061 2001 filelog=filelog)
2062 2002
2063 2003 class overlayworkingfilectx(committablefilectx):
2064 2004 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2065 2005 cache, which can be flushed through later by calling ``flush()``."""
2066 2006
2067 2007 def __init__(self, repo, path, filelog=None, parent=None):
2068 2008 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2069 2009 parent)
2070 2010 self._repo = repo
2071 2011 self._parent = parent
2072 2012 self._path = path
2073 2013
2074 2014 def cmp(self, fctx):
2075 2015 return self.data() != fctx.data()
2076 2016
2077 2017 def changectx(self):
2078 2018 return self._parent
2079 2019
2080 2020 def data(self):
2081 2021 return self._parent.data(self._path)
2082 2022
2083 2023 def date(self):
2084 2024 return self._parent.filedate(self._path)
2085 2025
2086 2026 def exists(self):
2087 2027 return self.lexists()
2088 2028
2089 2029 def lexists(self):
2090 2030 return self._parent.exists(self._path)
2091 2031
2092 2032 def renamed(self):
2093 2033 path = self._parent.copydata(self._path)
2094 2034 if not path:
2095 2035 return None
2096 2036 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2097 2037
2098 2038 def size(self):
2099 2039 return self._parent.size(self._path)
2100 2040
2101 2041 def markcopied(self, origin):
2102 2042 self._parent.markcopied(self._path, origin)
2103 2043
2104 2044 def audit(self):
2105 2045 pass
2106 2046
2107 2047 def flags(self):
2108 2048 return self._parent.flags(self._path)
2109 2049
2110 2050 def setflags(self, islink, isexec):
2111 2051 return self._parent.setflags(self._path, islink, isexec)
2112 2052
2113 2053 def write(self, data, flags, backgroundclose=False, **kwargs):
2114 2054 return self._parent.write(self._path, data, flags, **kwargs)
2115 2055
2116 2056 def remove(self, ignoremissing=False):
2117 2057 return self._parent.remove(self._path)
2118 2058
2119 2059 def clearunknown(self):
2120 2060 pass
2121 2061
2122 2062 class workingcommitctx(workingctx):
2123 2063 """A workingcommitctx object makes access to data related to
2124 2064 the revision being committed convenient.
2125 2065
2126 2066 This hides changes in the working directory, if they aren't
2127 2067 committed in this context.
2128 2068 """
2129 2069 def __init__(self, repo, changes,
2130 2070 text="", user=None, date=None, extra=None):
2131 2071 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2132 2072 changes)
2133 2073
2134 2074 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2135 2075 """Return matched files only in ``self._status``
2136 2076
2137 2077 Uncommitted files appear "clean" via this context, even if
2138 2078 they aren't actually so in the working directory.
2139 2079 """
2140 2080 if clean:
2141 2081 clean = [f for f in self._manifest if f not in self._changedset]
2142 2082 else:
2143 2083 clean = []
2144 2084 return scmutil.status([f for f in self._status.modified if match(f)],
2145 2085 [f for f in self._status.added if match(f)],
2146 2086 [f for f in self._status.removed if match(f)],
2147 2087 [], [], [], clean)
2148 2088
2149 2089 @propertycache
2150 2090 def _changedset(self):
2151 2091 """Return the set of files changed in this context
2152 2092 """
2153 2093 changed = set(self._status.modified)
2154 2094 changed.update(self._status.added)
2155 2095 changed.update(self._status.removed)
2156 2096 return changed
2157 2097
2158 2098 def makecachingfilectxfn(func):
2159 2099 """Create a filectxfn that caches based on the path.
2160 2100
2161 2101 We can't use util.cachefunc because it uses all arguments as the cache
2162 2102 key and this creates a cycle since the arguments include the repo and
2163 2103 memctx.
2164 2104 """
2165 2105 cache = {}
2166 2106
2167 2107 def getfilectx(repo, memctx, path):
2168 2108 if path not in cache:
2169 2109 cache[path] = func(repo, memctx, path)
2170 2110 return cache[path]
2171 2111
2172 2112 return getfilectx
2173 2113
2174 2114 def memfilefromctx(ctx):
2175 2115 """Given a context return a memfilectx for ctx[path]
2176 2116
2177 2117 This is a convenience method for building a memctx based on another
2178 2118 context.
2179 2119 """
2180 2120 def getfilectx(repo, memctx, path):
2181 2121 fctx = ctx[path]
2182 2122 # this is weird but apparently we only keep track of one parent
2183 2123 # (why not only store that instead of a tuple?)
2184 2124 copied = fctx.renamed()
2185 2125 if copied:
2186 2126 copied = copied[0]
2187 2127 return memfilectx(repo, memctx, path, fctx.data(),
2188 2128 islink=fctx.islink(), isexec=fctx.isexec(),
2189 2129 copied=copied)
2190 2130
2191 2131 return getfilectx
2192 2132
2193 2133 def memfilefrompatch(patchstore):
2194 2134 """Given a patch (e.g. patchstore object) return a memfilectx
2195 2135
2196 2136 This is a convenience method for building a memctx based on a patchstore.
2197 2137 """
2198 2138 def getfilectx(repo, memctx, path):
2199 2139 data, mode, copied = patchstore.getfile(path)
2200 2140 if data is None:
2201 2141 return None
2202 2142 islink, isexec = mode
2203 2143 return memfilectx(repo, memctx, path, data, islink=islink,
2204 2144 isexec=isexec, copied=copied)
2205 2145
2206 2146 return getfilectx
2207 2147
2208 2148 class memctx(committablectx):
2209 2149 """Use memctx to perform in-memory commits via localrepo.commitctx().
2210 2150
2211 2151 Revision information is supplied at initialization time while
2212 2152 related files data and is made available through a callback
2213 2153 mechanism. 'repo' is the current localrepo, 'parents' is a
2214 2154 sequence of two parent revisions identifiers (pass None for every
2215 2155 missing parent), 'text' is the commit message and 'files' lists
2216 2156 names of files touched by the revision (normalized and relative to
2217 2157 repository root).
2218 2158
2219 2159 filectxfn(repo, memctx, path) is a callable receiving the
2220 2160 repository, the current memctx object and the normalized path of
2221 2161 requested file, relative to repository root. It is fired by the
2222 2162 commit function for every file in 'files', but calls order is
2223 2163 undefined. If the file is available in the revision being
2224 2164 committed (updated or added), filectxfn returns a memfilectx
2225 2165 object. If the file was removed, filectxfn return None for recent
2226 2166 Mercurial. Moved files are represented by marking the source file
2227 2167 removed and the new file added with copy information (see
2228 2168 memfilectx).
2229 2169
2230 2170 user receives the committer name and defaults to current
2231 2171 repository username, date is the commit date in any format
2232 2172 supported by dateutil.parsedate() and defaults to current date, extra
2233 2173 is a dictionary of metadata or is left empty.
2234 2174 """
2235 2175
2236 2176 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2237 2177 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2238 2178 # this field to determine what to do in filectxfn.
2239 2179 _returnnoneformissingfiles = True
2240 2180
2241 2181 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2242 2182 date=None, extra=None, branch=None, editor=False):
2243 2183 super(memctx, self).__init__(repo, text, user, date, extra)
2244 2184 self._rev = None
2245 2185 self._node = None
2246 2186 parents = [(p or nullid) for p in parents]
2247 2187 p1, p2 = parents
2248 2188 self._parents = [self._repo[p] for p in (p1, p2)]
2249 2189 files = sorted(set(files))
2250 2190 self._files = files
2251 2191 if branch is not None:
2252 2192 self._extra['branch'] = encoding.fromlocal(branch)
2253 2193 self.substate = {}
2254 2194
2255 2195 if isinstance(filectxfn, patch.filestore):
2256 2196 filectxfn = memfilefrompatch(filectxfn)
2257 2197 elif not callable(filectxfn):
2258 2198 # if store is not callable, wrap it in a function
2259 2199 filectxfn = memfilefromctx(filectxfn)
2260 2200
2261 2201 # memoizing increases performance for e.g. vcs convert scenarios.
2262 2202 self._filectxfn = makecachingfilectxfn(filectxfn)
2263 2203
2264 2204 if editor:
2265 2205 self._text = editor(self._repo, self, [])
2266 2206 self._repo.savecommitmessage(self._text)
2267 2207
2268 2208 def filectx(self, path, filelog=None):
2269 2209 """get a file context from the working directory
2270 2210
2271 2211 Returns None if file doesn't exist and should be removed."""
2272 2212 return self._filectxfn(self._repo, self, path)
2273 2213
2274 2214 def commit(self):
2275 2215 """commit context to the repo"""
2276 2216 return self._repo.commitctx(self)
2277 2217
2278 2218 @propertycache
2279 2219 def _manifest(self):
2280 2220 """generate a manifest based on the return values of filectxfn"""
2281 2221
2282 2222 # keep this simple for now; just worry about p1
2283 2223 pctx = self._parents[0]
2284 2224 man = pctx.manifest().copy()
2285 2225
2286 2226 for f in self._status.modified:
2287 2227 man[f] = modifiednodeid
2288 2228
2289 2229 for f in self._status.added:
2290 2230 man[f] = addednodeid
2291 2231
2292 2232 for f in self._status.removed:
2293 2233 if f in man:
2294 2234 del man[f]
2295 2235
2296 2236 return man
2297 2237
2298 2238 @propertycache
2299 2239 def _status(self):
2300 2240 """Calculate exact status from ``files`` specified at construction
2301 2241 """
2302 2242 man1 = self.p1().manifest()
2303 2243 p2 = self._parents[1]
2304 2244 # "1 < len(self._parents)" can't be used for checking
2305 2245 # existence of the 2nd parent, because "memctx._parents" is
2306 2246 # explicitly initialized by the list, of which length is 2.
2307 2247 if p2.node() != nullid:
2308 2248 man2 = p2.manifest()
2309 2249 managing = lambda f: f in man1 or f in man2
2310 2250 else:
2311 2251 managing = lambda f: f in man1
2312 2252
2313 2253 modified, added, removed = [], [], []
2314 2254 for f in self._files:
2315 2255 if not managing(f):
2316 2256 added.append(f)
2317 2257 elif self[f]:
2318 2258 modified.append(f)
2319 2259 else:
2320 2260 removed.append(f)
2321 2261
2322 2262 return scmutil.status(modified, added, removed, [], [], [], [])
2323 2263
2324 2264 class memfilectx(committablefilectx):
2325 2265 """memfilectx represents an in-memory file to commit.
2326 2266
2327 2267 See memctx and committablefilectx for more details.
2328 2268 """
2329 2269 def __init__(self, repo, changectx, path, data, islink=False,
2330 2270 isexec=False, copied=None):
2331 2271 """
2332 2272 path is the normalized file path relative to repository root.
2333 2273 data is the file content as a string.
2334 2274 islink is True if the file is a symbolic link.
2335 2275 isexec is True if the file is executable.
2336 2276 copied is the source file path if current file was copied in the
2337 2277 revision being committed, or None."""
2338 2278 super(memfilectx, self).__init__(repo, path, None, changectx)
2339 2279 self._data = data
2340 2280 if islink:
2341 2281 self._flags = 'l'
2342 2282 elif isexec:
2343 2283 self._flags = 'x'
2344 2284 else:
2345 2285 self._flags = ''
2346 2286 self._copied = None
2347 2287 if copied:
2348 2288 self._copied = (copied, nullid)
2349 2289
2350 2290 def data(self):
2351 2291 return self._data
2352 2292
2353 2293 def remove(self, ignoremissing=False):
2354 2294 """wraps unlink for a repo's working directory"""
2355 2295 # need to figure out what to do here
2356 2296 del self._changectx[self._path]
2357 2297
2358 2298 def write(self, data, flags, **kwargs):
2359 2299 """wraps repo.wwrite"""
2360 2300 self._data = data
2361 2301
2362 2302
2363 2303 class metadataonlyctx(committablectx):
2364 2304 """Like memctx but it's reusing the manifest of different commit.
2365 2305 Intended to be used by lightweight operations that are creating
2366 2306 metadata-only changes.
2367 2307
2368 2308 Revision information is supplied at initialization time. 'repo' is the
2369 2309 current localrepo, 'ctx' is original revision which manifest we're reuisng
2370 2310 'parents' is a sequence of two parent revisions identifiers (pass None for
2371 2311 every missing parent), 'text' is the commit.
2372 2312
2373 2313 user receives the committer name and defaults to current repository
2374 2314 username, date is the commit date in any format supported by
2375 2315 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2376 2316 metadata or is left empty.
2377 2317 """
2378 2318 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2379 2319 date=None, extra=None, editor=False):
2380 2320 if text is None:
2381 2321 text = originalctx.description()
2382 2322 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2383 2323 self._rev = None
2384 2324 self._node = None
2385 2325 self._originalctx = originalctx
2386 2326 self._manifestnode = originalctx.manifestnode()
2387 2327 if parents is None:
2388 2328 parents = originalctx.parents()
2389 2329 else:
2390 2330 parents = [repo[p] for p in parents if p is not None]
2391 2331 parents = parents[:]
2392 2332 while len(parents) < 2:
2393 2333 parents.append(repo[nullid])
2394 2334 p1, p2 = self._parents = parents
2395 2335
2396 2336 # sanity check to ensure that the reused manifest parents are
2397 2337 # manifests of our commit parents
2398 2338 mp1, mp2 = self.manifestctx().parents
2399 2339 if p1 != nullid and p1.manifestnode() != mp1:
2400 2340 raise RuntimeError('can\'t reuse the manifest: '
2401 2341 'its p1 doesn\'t match the new ctx p1')
2402 2342 if p2 != nullid and p2.manifestnode() != mp2:
2403 2343 raise RuntimeError('can\'t reuse the manifest: '
2404 2344 'its p2 doesn\'t match the new ctx p2')
2405 2345
2406 2346 self._files = originalctx.files()
2407 2347 self.substate = {}
2408 2348
2409 2349 if editor:
2410 2350 self._text = editor(self._repo, self, [])
2411 2351 self._repo.savecommitmessage(self._text)
2412 2352
2413 2353 def manifestnode(self):
2414 2354 return self._manifestnode
2415 2355
2416 2356 @property
2417 2357 def _manifestctx(self):
2418 2358 return self._repo.manifestlog[self._manifestnode]
2419 2359
2420 2360 def filectx(self, path, filelog=None):
2421 2361 return self._originalctx.filectx(path, filelog=filelog)
2422 2362
2423 2363 def commit(self):
2424 2364 """commit context to the repo"""
2425 2365 return self._repo.commitctx(self)
2426 2366
2427 2367 @property
2428 2368 def _manifest(self):
2429 2369 return self._originalctx.manifest()
2430 2370
2431 2371 @propertycache
2432 2372 def _status(self):
2433 2373 """Calculate exact status from ``files`` specified in the ``origctx``
2434 2374 and parents manifests.
2435 2375 """
2436 2376 man1 = self.p1().manifest()
2437 2377 p2 = self._parents[1]
2438 2378 # "1 < len(self._parents)" can't be used for checking
2439 2379 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2440 2380 # explicitly initialized by the list, of which length is 2.
2441 2381 if p2.node() != nullid:
2442 2382 man2 = p2.manifest()
2443 2383 managing = lambda f: f in man1 or f in man2
2444 2384 else:
2445 2385 managing = lambda f: f in man1
2446 2386
2447 2387 modified, added, removed = [], [], []
2448 2388 for f in self._files:
2449 2389 if not managing(f):
2450 2390 added.append(f)
2451 2391 elif f in self:
2452 2392 modified.append(f)
2453 2393 else:
2454 2394 removed.append(f)
2455 2395
2456 2396 return scmutil.status(modified, added, removed, [], [], [], [])
2457 2397
2458 2398 class arbitraryfilectx(object):
2459 2399 """Allows you to use filectx-like functions on a file in an arbitrary
2460 2400 location on disk, possibly not in the working directory.
2461 2401 """
2462 2402 def __init__(self, path, repo=None):
2463 2403 # Repo is optional because contrib/simplemerge uses this class.
2464 2404 self._repo = repo
2465 2405 self._path = path
2466 2406
2467 2407 def cmp(self, fctx):
2468 2408 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2469 2409 # path if either side is a symlink.
2470 2410 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2471 2411 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2472 2412 # Add a fast-path for merge if both sides are disk-backed.
2473 2413 # Note that filecmp uses the opposite return values (True if same)
2474 2414 # from our cmp functions (True if different).
2475 2415 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2476 2416 return self.data() != fctx.data()
2477 2417
2478 2418 def path(self):
2479 2419 return self._path
2480 2420
2481 2421 def flags(self):
2482 2422 return ''
2483 2423
2484 2424 def data(self):
2485 2425 return util.readfile(self._path)
2486 2426
2487 2427 def decodeddata(self):
2488 2428 with open(self._path, "rb") as f:
2489 2429 return f.read()
2490 2430
2491 2431 def remove(self):
2492 2432 util.unlink(self._path)
2493 2433
2494 2434 def write(self, data, flags, **kwargs):
2495 2435 assert not flags
2496 2436 with open(self._path, "w") as f:
2497 2437 f.write(data)
@@ -1,2941 +1,3001 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 bin,
20 21 hex,
21 22 nullid,
23 nullrev,
22 24 short,
23 25 )
24 26 from . import (
25 27 bookmarks,
26 28 branchmap,
27 29 bundle2,
28 30 changegroup,
29 31 changelog,
30 32 color,
31 33 context,
32 34 dirstate,
33 35 dirstateguard,
34 36 discovery,
35 37 encoding,
36 38 error,
37 39 exchange,
38 40 extensions,
39 41 filelog,
40 42 hook,
41 43 lock as lockmod,
42 44 manifest,
43 45 match as matchmod,
44 46 merge as mergemod,
45 47 mergeutil,
46 48 namespaces,
47 49 narrowspec,
48 50 obsolete,
49 51 pathutil,
50 52 phases,
51 53 pushkey,
52 54 pycompat,
53 55 repository,
54 56 repoview,
55 57 revset,
56 58 revsetlang,
57 59 scmutil,
58 60 sparse,
59 61 store as storemod,
60 62 subrepoutil,
61 63 tags as tagsmod,
62 64 transaction,
63 65 txnutil,
64 66 util,
65 67 vfs as vfsmod,
66 68 )
67 69 from .utils import (
68 70 interfaceutil,
69 71 procutil,
70 72 stringutil,
71 73 )
72 74
73 75 from .revlogutils import (
74 76 constants as revlogconst,
75 77 )
76 78
77 79 release = lockmod.release
78 80 urlerr = util.urlerr
79 81 urlreq = util.urlreq
80 82
81 83 # set of (path, vfs-location) tuples. vfs-location is:
82 84 # - 'plain for vfs relative paths
83 85 # - '' for svfs relative paths
84 86 _cachedfiles = set()
85 87
86 88 class _basefilecache(scmutil.filecache):
87 89 """All filecache usage on repo are done for logic that should be unfiltered
88 90 """
89 91 def __get__(self, repo, type=None):
90 92 if repo is None:
91 93 return self
92 94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 95 def __set__(self, repo, value):
94 96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 97 def __delete__(self, repo):
96 98 return super(_basefilecache, self).__delete__(repo.unfiltered())
97 99
98 100 class repofilecache(_basefilecache):
99 101 """filecache for files in .hg but outside of .hg/store"""
100 102 def __init__(self, *paths):
101 103 super(repofilecache, self).__init__(*paths)
102 104 for path in paths:
103 105 _cachedfiles.add((path, 'plain'))
104 106
105 107 def join(self, obj, fname):
106 108 return obj.vfs.join(fname)
107 109
108 110 class storecache(_basefilecache):
109 111 """filecache for files in the store"""
110 112 def __init__(self, *paths):
111 113 super(storecache, self).__init__(*paths)
112 114 for path in paths:
113 115 _cachedfiles.add((path, ''))
114 116
115 117 def join(self, obj, fname):
116 118 return obj.sjoin(fname)
117 119
118 120 def isfilecached(repo, name):
119 121 """check if a repo has already cached "name" filecache-ed property
120 122
121 123 This returns (cachedobj-or-None, iscached) tuple.
122 124 """
123 125 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 126 if not cacheentry:
125 127 return None, False
126 128 return cacheentry.obj, True
127 129
128 130 class unfilteredpropertycache(util.propertycache):
129 131 """propertycache that apply to unfiltered repo only"""
130 132
131 133 def __get__(self, repo, type=None):
132 134 unfi = repo.unfiltered()
133 135 if unfi is repo:
134 136 return super(unfilteredpropertycache, self).__get__(unfi)
135 137 return getattr(unfi, self.name)
136 138
137 139 class filteredpropertycache(util.propertycache):
138 140 """propertycache that must take filtering in account"""
139 141
140 142 def cachevalue(self, obj, value):
141 143 object.__setattr__(obj, self.name, value)
142 144
143 145
144 146 def hasunfilteredcache(repo, name):
145 147 """check if a repo has an unfilteredpropertycache value for <name>"""
146 148 return name in vars(repo.unfiltered())
147 149
148 150 def unfilteredmethod(orig):
149 151 """decorate method that always need to be run on unfiltered version"""
150 152 def wrapper(repo, *args, **kwargs):
151 153 return orig(repo.unfiltered(), *args, **kwargs)
152 154 return wrapper
153 155
154 156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 157 'unbundle'}
156 158 legacycaps = moderncaps.union({'changegroupsubset'})
157 159
158 160 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 161 class localcommandexecutor(object):
160 162 def __init__(self, peer):
161 163 self._peer = peer
162 164 self._sent = False
163 165 self._closed = False
164 166
165 167 def __enter__(self):
166 168 return self
167 169
168 170 def __exit__(self, exctype, excvalue, exctb):
169 171 self.close()
170 172
171 173 def callcommand(self, command, args):
172 174 if self._sent:
173 175 raise error.ProgrammingError('callcommand() cannot be used after '
174 176 'sendcommands()')
175 177
176 178 if self._closed:
177 179 raise error.ProgrammingError('callcommand() cannot be used after '
178 180 'close()')
179 181
180 182 # We don't need to support anything fancy. Just call the named
181 183 # method on the peer and return a resolved future.
182 184 fn = getattr(self._peer, pycompat.sysstr(command))
183 185
184 186 f = pycompat.futures.Future()
185 187
186 188 try:
187 189 result = fn(**pycompat.strkwargs(args))
188 190 except Exception:
189 191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 192 else:
191 193 f.set_result(result)
192 194
193 195 return f
194 196
195 197 def sendcommands(self):
196 198 self._sent = True
197 199
198 200 def close(self):
199 201 self._closed = True
200 202
201 203 @interfaceutil.implementer(repository.ipeercommands)
202 204 class localpeer(repository.peer):
203 205 '''peer for a local repo; reflects only the most recent API'''
204 206
205 207 def __init__(self, repo, caps=None):
206 208 super(localpeer, self).__init__()
207 209
208 210 if caps is None:
209 211 caps = moderncaps.copy()
210 212 self._repo = repo.filtered('served')
211 213 self.ui = repo.ui
212 214 self._caps = repo._restrictcapabilities(caps)
213 215
214 216 # Begin of _basepeer interface.
215 217
216 218 def url(self):
217 219 return self._repo.url()
218 220
219 221 def local(self):
220 222 return self._repo
221 223
222 224 def peer(self):
223 225 return self
224 226
225 227 def canpush(self):
226 228 return True
227 229
228 230 def close(self):
229 231 self._repo.close()
230 232
231 233 # End of _basepeer interface.
232 234
233 235 # Begin of _basewirecommands interface.
234 236
235 237 def branchmap(self):
236 238 return self._repo.branchmap()
237 239
238 240 def capabilities(self):
239 241 return self._caps
240 242
241 243 def clonebundles(self):
242 244 return self._repo.tryread('clonebundles.manifest')
243 245
244 246 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 247 """Used to test argument passing over the wire"""
246 248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 249 pycompat.bytestr(four),
248 250 pycompat.bytestr(five))
249 251
250 252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 253 **kwargs):
252 254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 255 common=common, bundlecaps=bundlecaps,
254 256 **kwargs)[1]
255 257 cb = util.chunkbuffer(chunks)
256 258
257 259 if exchange.bundle2requested(bundlecaps):
258 260 # When requesting a bundle2, getbundle returns a stream to make the
259 261 # wire level function happier. We need to build a proper object
260 262 # from it in local peer.
261 263 return bundle2.getunbundler(self.ui, cb)
262 264 else:
263 265 return changegroup.getunbundler('01', cb, None)
264 266
265 267 def heads(self):
266 268 return self._repo.heads()
267 269
268 270 def known(self, nodes):
269 271 return self._repo.known(nodes)
270 272
271 273 def listkeys(self, namespace):
272 274 return self._repo.listkeys(namespace)
273 275
274 276 def lookup(self, key):
275 277 return self._repo.lookup(key)
276 278
277 279 def pushkey(self, namespace, key, old, new):
278 280 return self._repo.pushkey(namespace, key, old, new)
279 281
280 282 def stream_out(self):
281 283 raise error.Abort(_('cannot perform stream clone against local '
282 284 'peer'))
283 285
284 286 def unbundle(self, bundle, heads, url):
285 287 """apply a bundle on a repo
286 288
287 289 This function handles the repo locking itself."""
288 290 try:
289 291 try:
290 292 bundle = exchange.readbundle(self.ui, bundle, None)
291 293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 294 if util.safehasattr(ret, 'getchunks'):
293 295 # This is a bundle20 object, turn it into an unbundler.
294 296 # This little dance should be dropped eventually when the
295 297 # API is finally improved.
296 298 stream = util.chunkbuffer(ret.getchunks())
297 299 ret = bundle2.getunbundler(self.ui, stream)
298 300 return ret
299 301 except Exception as exc:
300 302 # If the exception contains output salvaged from a bundle2
301 303 # reply, we need to make sure it is printed before continuing
302 304 # to fail. So we build a bundle2 with such output and consume
303 305 # it directly.
304 306 #
305 307 # This is not very elegant but allows a "simple" solution for
306 308 # issue4594
307 309 output = getattr(exc, '_bundle2salvagedoutput', ())
308 310 if output:
309 311 bundler = bundle2.bundle20(self._repo.ui)
310 312 for out in output:
311 313 bundler.addpart(out)
312 314 stream = util.chunkbuffer(bundler.getchunks())
313 315 b = bundle2.getunbundler(self.ui, stream)
314 316 bundle2.processbundle(self._repo, b)
315 317 raise
316 318 except error.PushRaced as exc:
317 319 raise error.ResponseError(_('push failed:'),
318 320 stringutil.forcebytestr(exc))
319 321
320 322 # End of _basewirecommands interface.
321 323
322 324 # Begin of peer interface.
323 325
324 326 def commandexecutor(self):
325 327 return localcommandexecutor(self)
326 328
327 329 # End of peer interface.
328 330
329 331 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 332 class locallegacypeer(localpeer):
331 333 '''peer extension which implements legacy methods too; used for tests with
332 334 restricted capabilities'''
333 335
334 336 def __init__(self, repo):
335 337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336 338
337 339 # Begin of baselegacywirecommands interface.
338 340
339 341 def between(self, pairs):
340 342 return self._repo.between(pairs)
341 343
342 344 def branches(self, nodes):
343 345 return self._repo.branches(nodes)
344 346
345 347 def changegroup(self, nodes, source):
346 348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 349 missingheads=self._repo.heads())
348 350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 351
350 352 def changegroupsubset(self, bases, heads, source):
351 353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 354 missingheads=heads)
353 355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354 356
355 357 # End of baselegacywirecommands interface.
356 358
357 359 # Increment the sub-version when the revlog v2 format changes to lock out old
358 360 # clients.
359 361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360 362
361 363 # A repository with the sparserevlog feature will have delta chains that
362 364 # can spread over a larger span. Sparse reading cuts these large spans into
363 365 # pieces, so that each piece isn't too big.
364 366 # Without the sparserevlog capability, reading from the repository could use
365 367 # huge amounts of memory, because the whole span would be read at once,
366 368 # including all the intermediate revisions that aren't pertinent for the chain.
367 369 # This is why once a repository has enabled sparse-read, it becomes required.
368 370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369 371
370 372 # Functions receiving (ui, features) that extensions can register to impact
371 373 # the ability to load repositories with custom requirements. Only
372 374 # functions defined in loaded extensions are called.
373 375 #
374 376 # The function receives a set of requirement strings that the repository
375 377 # is capable of opening. Functions will typically add elements to the
376 378 # set to reflect that the extension knows how to handle that requirements.
377 379 featuresetupfuncs = set()
378 380
379 381 def makelocalrepository(baseui, path, intents=None):
380 382 """Create a local repository object.
381 383
382 384 Given arguments needed to construct a local repository, this function
383 385 performs various early repository loading functionality (such as
384 386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
385 387 the repository can be opened, derives a type suitable for representing
386 388 that repository, and returns an instance of it.
387 389
388 390 The returned object conforms to the ``repository.completelocalrepository``
389 391 interface.
390 392
391 393 The repository type is derived by calling a series of factory functions
392 394 for each aspect/interface of the final repository. These are defined by
393 395 ``REPO_INTERFACES``.
394 396
395 397 Each factory function is called to produce a type implementing a specific
396 398 interface. The cumulative list of returned types will be combined into a
397 399 new type and that type will be instantiated to represent the local
398 400 repository.
399 401
400 402 The factory functions each receive various state that may be consulted
401 403 as part of deriving a type.
402 404
403 405 Extensions should wrap these factory functions to customize repository type
404 406 creation. Note that an extension's wrapped function may be called even if
405 407 that extension is not loaded for the repo being constructed. Extensions
406 408 should check if their ``__name__`` appears in the
407 409 ``extensionmodulenames`` set passed to the factory function and no-op if
408 410 not.
409 411 """
410 412 ui = baseui.copy()
411 413 # Prevent copying repo configuration.
412 414 ui.copy = baseui.copy
413 415
414 416 # Working directory VFS rooted at repository root.
415 417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
416 418
417 419 # Main VFS for .hg/ directory.
418 420 hgpath = wdirvfs.join(b'.hg')
419 421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
420 422
421 423 # The .hg/ path should exist and should be a directory. All other
422 424 # cases are errors.
423 425 if not hgvfs.isdir():
424 426 try:
425 427 hgvfs.stat()
426 428 except OSError as e:
427 429 if e.errno != errno.ENOENT:
428 430 raise
429 431
430 432 raise error.RepoError(_(b'repository %s not found') % path)
431 433
432 434 # .hg/requires file contains a newline-delimited list of
433 435 # features/capabilities the opener (us) must have in order to use
434 436 # the repository. This file was introduced in Mercurial 0.9.2,
435 437 # which means very old repositories may not have one. We assume
436 438 # a missing file translates to no requirements.
437 439 try:
438 440 requirements = set(hgvfs.read(b'requires').splitlines())
439 441 except IOError as e:
440 442 if e.errno != errno.ENOENT:
441 443 raise
442 444 requirements = set()
443 445
444 446 # The .hg/hgrc file may load extensions or contain config options
445 447 # that influence repository construction. Attempt to load it and
446 448 # process any new extensions that it may have pulled in.
447 449 try:
448 450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
449 451 # Run this before extensions.loadall() so extensions can be
450 452 # automatically enabled.
451 453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
452 454 except IOError:
453 455 pass
454 456 else:
455 457 extensions.loadall(ui)
456 458
457 459 # Set of module names of extensions loaded for this repository.
458 460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
459 461
460 462 supportedrequirements = gathersupportedrequirements(ui)
461 463
462 464 # We first validate the requirements are known.
463 465 ensurerequirementsrecognized(requirements, supportedrequirements)
464 466
465 467 # Then we validate that the known set is reasonable to use together.
466 468 ensurerequirementscompatible(ui, requirements)
467 469
468 470 # TODO there are unhandled edge cases related to opening repositories with
469 471 # shared storage. If storage is shared, we should also test for requirements
470 472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
471 473 # that repo, as that repo may load extensions needed to open it. This is a
472 474 # bit complicated because we don't want the other hgrc to overwrite settings
473 475 # in this hgrc.
474 476 #
475 477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
476 478 # file when sharing repos. But if a requirement is added after the share is
477 479 # performed, thereby introducing a new requirement for the opener, we may
478 480 # will not see that and could encounter a run-time error interacting with
479 481 # that shared store since it has an unknown-to-us requirement.
480 482
481 483 # At this point, we know we should be capable of opening the repository.
482 484 # Now get on with doing that.
483 485
484 486 features = set()
485 487
486 488 # The "store" part of the repository holds versioned data. How it is
487 489 # accessed is determined by various requirements. The ``shared`` or
488 490 # ``relshared`` requirements indicate the store lives in the path contained
489 491 # in the ``.hg/sharedpath`` file. This is an absolute path for
490 492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
491 493 if b'shared' in requirements or b'relshared' in requirements:
492 494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
493 495 if b'relshared' in requirements:
494 496 sharedpath = hgvfs.join(sharedpath)
495 497
496 498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
497 499
498 500 if not sharedvfs.exists():
499 501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
500 502 b'directory %s') % sharedvfs.base)
501 503
502 504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
503 505
504 506 storebasepath = sharedvfs.base
505 507 cachepath = sharedvfs.join(b'cache')
506 508 else:
507 509 storebasepath = hgvfs.base
508 510 cachepath = hgvfs.join(b'cache')
509 511
510 512 # The store has changed over time and the exact layout is dictated by
511 513 # requirements. The store interface abstracts differences across all
512 514 # of them.
513 515 store = makestore(requirements, storebasepath,
514 516 lambda base: vfsmod.vfs(base, cacheaudited=True))
515 517 hgvfs.createmode = store.createmode
516 518
517 519 storevfs = store.vfs
518 520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
519 521
520 522 # The cache vfs is used to manage cache files.
521 523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
522 524 cachevfs.createmode = store.createmode
523 525
524 526 # Now resolve the type for the repository object. We do this by repeatedly
525 527 # calling a factory function to produces types for specific aspects of the
526 528 # repo's operation. The aggregate returned types are used as base classes
527 529 # for a dynamically-derived type, which will represent our new repository.
528 530
529 531 bases = []
530 532 extrastate = {}
531 533
532 534 for iface, fn in REPO_INTERFACES:
533 535 # We pass all potentially useful state to give extensions tons of
534 536 # flexibility.
535 537 typ = fn(ui=ui,
536 538 intents=intents,
537 539 requirements=requirements,
538 540 features=features,
539 541 wdirvfs=wdirvfs,
540 542 hgvfs=hgvfs,
541 543 store=store,
542 544 storevfs=storevfs,
543 545 storeoptions=storevfs.options,
544 546 cachevfs=cachevfs,
545 547 extensionmodulenames=extensionmodulenames,
546 548 extrastate=extrastate,
547 549 baseclasses=bases)
548 550
549 551 if not isinstance(typ, type):
550 552 raise error.ProgrammingError('unable to construct type for %s' %
551 553 iface)
552 554
553 555 bases.append(typ)
554 556
555 557 # type() allows you to use characters in type names that wouldn't be
556 558 # recognized as Python symbols in source code. We abuse that to add
557 559 # rich information about our constructed repo.
558 560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
559 561 wdirvfs.base,
560 562 b','.join(sorted(requirements))))
561 563
562 564 cls = type(name, tuple(bases), {})
563 565
564 566 return cls(
565 567 baseui=baseui,
566 568 ui=ui,
567 569 origroot=path,
568 570 wdirvfs=wdirvfs,
569 571 hgvfs=hgvfs,
570 572 requirements=requirements,
571 573 supportedrequirements=supportedrequirements,
572 574 sharedpath=storebasepath,
573 575 store=store,
574 576 cachevfs=cachevfs,
575 577 features=features,
576 578 intents=intents)
577 579
578 580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
579 581 """Perform additional actions after .hg/hgrc is loaded.
580 582
581 583 This function is called during repository loading immediately after
582 584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
583 585
584 586 The function can be used to validate configs, automatically add
585 587 options (including extensions) based on requirements, etc.
586 588 """
587 589
588 590 # Map of requirements to list of extensions to load automatically when
589 591 # requirement is present.
590 592 autoextensions = {
591 593 b'largefiles': [b'largefiles'],
592 594 b'lfs': [b'lfs'],
593 595 }
594 596
595 597 for requirement, names in sorted(autoextensions.items()):
596 598 if requirement not in requirements:
597 599 continue
598 600
599 601 for name in names:
600 602 if not ui.hasconfig(b'extensions', name):
601 603 ui.setconfig(b'extensions', name, b'', source='autoload')
602 604
603 605 def gathersupportedrequirements(ui):
604 606 """Determine the complete set of recognized requirements."""
605 607 # Start with all requirements supported by this file.
606 608 supported = set(localrepository._basesupported)
607 609
608 610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
609 611 # relevant to this ui instance.
610 612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
611 613
612 614 for fn in featuresetupfuncs:
613 615 if fn.__module__ in modules:
614 616 fn(ui, supported)
615 617
616 618 # Add derived requirements from registered compression engines.
617 619 for name in util.compengines:
618 620 engine = util.compengines[name]
619 621 if engine.revlogheader():
620 622 supported.add(b'exp-compression-%s' % name)
621 623
622 624 return supported
623 625
624 626 def ensurerequirementsrecognized(requirements, supported):
625 627 """Validate that a set of local requirements is recognized.
626 628
627 629 Receives a set of requirements. Raises an ``error.RepoError`` if there
628 630 exists any requirement in that set that currently loaded code doesn't
629 631 recognize.
630 632
631 633 Returns a set of supported requirements.
632 634 """
633 635 missing = set()
634 636
635 637 for requirement in requirements:
636 638 if requirement in supported:
637 639 continue
638 640
639 641 if not requirement or not requirement[0:1].isalnum():
640 642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
641 643
642 644 missing.add(requirement)
643 645
644 646 if missing:
645 647 raise error.RequirementError(
646 648 _(b'repository requires features unknown to this Mercurial: %s') %
647 649 b' '.join(sorted(missing)),
648 650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
649 651 b'for more information'))
650 652
651 653 def ensurerequirementscompatible(ui, requirements):
652 654 """Validates that a set of recognized requirements is mutually compatible.
653 655
654 656 Some requirements may not be compatible with others or require
655 657 config options that aren't enabled. This function is called during
656 658 repository opening to ensure that the set of requirements needed
657 659 to open a repository is sane and compatible with config options.
658 660
659 661 Extensions can monkeypatch this function to perform additional
660 662 checking.
661 663
662 664 ``error.RepoError`` should be raised on failure.
663 665 """
664 666 if b'exp-sparse' in requirements and not sparse.enabled:
665 667 raise error.RepoError(_(b'repository is using sparse feature but '
666 668 b'sparse is not enabled; enable the '
667 669 b'"sparse" extensions to access'))
668 670
669 671 def makestore(requirements, path, vfstype):
670 672 """Construct a storage object for a repository."""
671 673 if b'store' in requirements:
672 674 if b'fncache' in requirements:
673 675 return storemod.fncachestore(path, vfstype,
674 676 b'dotencode' in requirements)
675 677
676 678 return storemod.encodedstore(path, vfstype)
677 679
678 680 return storemod.basicstore(path, vfstype)
679 681
680 682 def resolvestorevfsoptions(ui, requirements, features):
681 683 """Resolve the options to pass to the store vfs opener.
682 684
683 685 The returned dict is used to influence behavior of the storage layer.
684 686 """
685 687 options = {}
686 688
687 689 if b'treemanifest' in requirements:
688 690 options[b'treemanifest'] = True
689 691
690 692 # experimental config: format.manifestcachesize
691 693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
692 694 if manifestcachesize is not None:
693 695 options[b'manifestcachesize'] = manifestcachesize
694 696
695 697 # In the absence of another requirement superseding a revlog-related
696 698 # requirement, we have to assume the repo is using revlog version 0.
697 699 # This revlog format is super old and we don't bother trying to parse
698 700 # opener options for it because those options wouldn't do anything
699 701 # meaningful on such old repos.
700 702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
701 703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
702 704
703 705 return options
704 706
705 707 def resolverevlogstorevfsoptions(ui, requirements, features):
706 708 """Resolve opener options specific to revlogs."""
707 709
708 710 options = {}
709 711
710 712 if b'revlogv1' in requirements:
711 713 options[b'revlogv1'] = True
712 714 if REVLOGV2_REQUIREMENT in requirements:
713 715 options[b'revlogv2'] = True
714 716
715 717 if b'generaldelta' in requirements:
716 718 options[b'generaldelta'] = True
717 719
718 720 # experimental config: format.chunkcachesize
719 721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
720 722 if chunkcachesize is not None:
721 723 options[b'chunkcachesize'] = chunkcachesize
722 724
723 725 deltabothparents = ui.configbool(b'storage',
724 726 b'revlog.optimize-delta-parent-choice')
725 727 options[b'deltabothparents'] = deltabothparents
726 728
727 729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
728 730
729 731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
730 732 if 0 <= chainspan:
731 733 options[b'maxdeltachainspan'] = chainspan
732 734
733 735 mmapindexthreshold = ui.configbytes(b'experimental',
734 736 b'mmapindexthreshold')
735 737 if mmapindexthreshold is not None:
736 738 options[b'mmapindexthreshold'] = mmapindexthreshold
737 739
738 740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
739 741 srdensitythres = float(ui.config(b'experimental',
740 742 b'sparse-read.density-threshold'))
741 743 srmingapsize = ui.configbytes(b'experimental',
742 744 b'sparse-read.min-gap-size')
743 745 options[b'with-sparse-read'] = withsparseread
744 746 options[b'sparse-read-density-threshold'] = srdensitythres
745 747 options[b'sparse-read-min-gap-size'] = srmingapsize
746 748
747 749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
748 750 options[b'sparse-revlog'] = sparserevlog
749 751 if sparserevlog:
750 752 options[b'generaldelta'] = True
751 753
752 754 maxchainlen = None
753 755 if sparserevlog:
754 756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
755 757 # experimental config: format.maxchainlen
756 758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
757 759 if maxchainlen is not None:
758 760 options[b'maxchainlen'] = maxchainlen
759 761
760 762 for r in requirements:
761 763 if r.startswith(b'exp-compression-'):
762 764 options[b'compengine'] = r[len(b'exp-compression-'):]
763 765
764 766 if repository.NARROW_REQUIREMENT in requirements:
765 767 options[b'enableellipsis'] = True
766 768
767 769 return options
768 770
769 771 def makemain(**kwargs):
770 772 """Produce a type conforming to ``ilocalrepositorymain``."""
771 773 return localrepository
772 774
773 775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
774 776 class revlogfilestorage(object):
775 777 """File storage when using revlogs."""
776 778
777 779 def file(self, path):
778 780 if path[0] == b'/':
779 781 path = path[1:]
780 782
781 783 return filelog.filelog(self.svfs, path)
782 784
783 785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
784 786 class revlognarrowfilestorage(object):
785 787 """File storage when using revlogs and narrow files."""
786 788
787 789 def file(self, path):
788 790 if path[0] == b'/':
789 791 path = path[1:]
790 792
791 793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
792 794
793 795 def makefilestorage(requirements, features, **kwargs):
794 796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
795 797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
796 798
797 799 if repository.NARROW_REQUIREMENT in requirements:
798 800 return revlognarrowfilestorage
799 801 else:
800 802 return revlogfilestorage
801 803
802 804 # List of repository interfaces and factory functions for them. Each
803 805 # will be called in order during ``makelocalrepository()`` to iteratively
804 806 # derive the final type for a local repository instance.
805 807 REPO_INTERFACES = [
806 808 (repository.ilocalrepositorymain, makemain),
807 809 (repository.ilocalrepositoryfilestorage, makefilestorage),
808 810 ]
809 811
810 812 @interfaceutil.implementer(repository.ilocalrepositorymain)
811 813 class localrepository(object):
812 814 """Main class for representing local repositories.
813 815
814 816 All local repositories are instances of this class.
815 817
816 818 Constructed on its own, instances of this class are not usable as
817 819 repository objects. To obtain a usable repository object, call
818 820 ``hg.repository()``, ``localrepo.instance()``, or
819 821 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
820 822 ``instance()`` adds support for creating new repositories.
821 823 ``hg.repository()`` adds more extension integration, including calling
822 824 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
823 825 used.
824 826 """
825 827
826 828 # obsolete experimental requirements:
827 829 # - manifestv2: An experimental new manifest format that allowed
828 830 # for stem compression of long paths. Experiment ended up not
829 831 # being successful (repository sizes went up due to worse delta
830 832 # chains), and the code was deleted in 4.6.
831 833 supportedformats = {
832 834 'revlogv1',
833 835 'generaldelta',
834 836 'treemanifest',
835 837 REVLOGV2_REQUIREMENT,
836 838 SPARSEREVLOG_REQUIREMENT,
837 839 }
838 840 _basesupported = supportedformats | {
839 841 'store',
840 842 'fncache',
841 843 'shared',
842 844 'relshared',
843 845 'dotencode',
844 846 'exp-sparse',
845 847 'internal-phase'
846 848 }
847 849
848 850 # list of prefix for file which can be written without 'wlock'
849 851 # Extensions should extend this list when needed
850 852 _wlockfreeprefix = {
851 853 # We migh consider requiring 'wlock' for the next
852 854 # two, but pretty much all the existing code assume
853 855 # wlock is not needed so we keep them excluded for
854 856 # now.
855 857 'hgrc',
856 858 'requires',
857 859 # XXX cache is a complicatged business someone
858 860 # should investigate this in depth at some point
859 861 'cache/',
860 862 # XXX shouldn't be dirstate covered by the wlock?
861 863 'dirstate',
862 864 # XXX bisect was still a bit too messy at the time
863 865 # this changeset was introduced. Someone should fix
864 866 # the remainig bit and drop this line
865 867 'bisect.state',
866 868 }
867 869
868 870 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
869 871 supportedrequirements, sharedpath, store, cachevfs,
870 872 features, intents=None):
871 873 """Create a new local repository instance.
872 874
873 875 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
874 876 or ``localrepo.makelocalrepository()`` for obtaining a new repository
875 877 object.
876 878
877 879 Arguments:
878 880
879 881 baseui
880 882 ``ui.ui`` instance that ``ui`` argument was based off of.
881 883
882 884 ui
883 885 ``ui.ui`` instance for use by the repository.
884 886
885 887 origroot
886 888 ``bytes`` path to working directory root of this repository.
887 889
888 890 wdirvfs
889 891 ``vfs.vfs`` rooted at the working directory.
890 892
891 893 hgvfs
892 894 ``vfs.vfs`` rooted at .hg/
893 895
894 896 requirements
895 897 ``set`` of bytestrings representing repository opening requirements.
896 898
897 899 supportedrequirements
898 900 ``set`` of bytestrings representing repository requirements that we
899 901 know how to open. May be a supetset of ``requirements``.
900 902
901 903 sharedpath
902 904 ``bytes`` Defining path to storage base directory. Points to a
903 905 ``.hg/`` directory somewhere.
904 906
905 907 store
906 908 ``store.basicstore`` (or derived) instance providing access to
907 909 versioned storage.
908 910
909 911 cachevfs
910 912 ``vfs.vfs`` used for cache files.
911 913
912 914 features
913 915 ``set`` of bytestrings defining features/capabilities of this
914 916 instance.
915 917
916 918 intents
917 919 ``set`` of system strings indicating what this repo will be used
918 920 for.
919 921 """
920 922 self.baseui = baseui
921 923 self.ui = ui
922 924 self.origroot = origroot
923 925 # vfs rooted at working directory.
924 926 self.wvfs = wdirvfs
925 927 self.root = wdirvfs.base
926 928 # vfs rooted at .hg/. Used to access most non-store paths.
927 929 self.vfs = hgvfs
928 930 self.path = hgvfs.base
929 931 self.requirements = requirements
930 932 self.supported = supportedrequirements
931 933 self.sharedpath = sharedpath
932 934 self.store = store
933 935 self.cachevfs = cachevfs
934 936 self.features = features
935 937
936 938 self.filtername = None
937 939
938 940 if (self.ui.configbool('devel', 'all-warnings') or
939 941 self.ui.configbool('devel', 'check-locks')):
940 942 self.vfs.audit = self._getvfsward(self.vfs.audit)
941 943 # A list of callback to shape the phase if no data were found.
942 944 # Callback are in the form: func(repo, roots) --> processed root.
943 945 # This list it to be filled by extension during repo setup
944 946 self._phasedefaults = []
945 947
946 948 color.setup(self.ui)
947 949
948 950 self.spath = self.store.path
949 951 self.svfs = self.store.vfs
950 952 self.sjoin = self.store.join
951 953 if (self.ui.configbool('devel', 'all-warnings') or
952 954 self.ui.configbool('devel', 'check-locks')):
953 955 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
954 956 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
955 957 else: # standard vfs
956 958 self.svfs.audit = self._getsvfsward(self.svfs.audit)
957 959
958 960 self._dirstatevalidatewarned = False
959 961
960 962 self._branchcaches = {}
961 963 self._revbranchcache = None
962 964 self._filterpats = {}
963 965 self._datafilters = {}
964 966 self._transref = self._lockref = self._wlockref = None
965 967
966 968 # A cache for various files under .hg/ that tracks file changes,
967 969 # (used by the filecache decorator)
968 970 #
969 971 # Maps a property name to its util.filecacheentry
970 972 self._filecache = {}
971 973
972 974 # hold sets of revision to be filtered
973 975 # should be cleared when something might have changed the filter value:
974 976 # - new changesets,
975 977 # - phase change,
976 978 # - new obsolescence marker,
977 979 # - working directory parent change,
978 980 # - bookmark changes
979 981 self.filteredrevcache = {}
980 982
981 983 # post-dirstate-status hooks
982 984 self._postdsstatus = []
983 985
984 986 # generic mapping between names and nodes
985 987 self.names = namespaces.namespaces()
986 988
987 989 # Key to signature value.
988 990 self._sparsesignaturecache = {}
989 991 # Signature to cached matcher instance.
990 992 self._sparsematchercache = {}
991 993
992 994 def _getvfsward(self, origfunc):
993 995 """build a ward for self.vfs"""
994 996 rref = weakref.ref(self)
995 997 def checkvfs(path, mode=None):
996 998 ret = origfunc(path, mode=mode)
997 999 repo = rref()
998 1000 if (repo is None
999 1001 or not util.safehasattr(repo, '_wlockref')
1000 1002 or not util.safehasattr(repo, '_lockref')):
1001 1003 return
1002 1004 if mode in (None, 'r', 'rb'):
1003 1005 return
1004 1006 if path.startswith(repo.path):
1005 1007 # truncate name relative to the repository (.hg)
1006 1008 path = path[len(repo.path) + 1:]
1007 1009 if path.startswith('cache/'):
1008 1010 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1009 1011 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1010 1012 if path.startswith('journal.'):
1011 1013 # journal is covered by 'lock'
1012 1014 if repo._currentlock(repo._lockref) is None:
1013 1015 repo.ui.develwarn('write with no lock: "%s"' % path,
1014 1016 stacklevel=2, config='check-locks')
1015 1017 elif repo._currentlock(repo._wlockref) is None:
1016 1018 # rest of vfs files are covered by 'wlock'
1017 1019 #
1018 1020 # exclude special files
1019 1021 for prefix in self._wlockfreeprefix:
1020 1022 if path.startswith(prefix):
1021 1023 return
1022 1024 repo.ui.develwarn('write with no wlock: "%s"' % path,
1023 1025 stacklevel=2, config='check-locks')
1024 1026 return ret
1025 1027 return checkvfs
1026 1028
1027 1029 def _getsvfsward(self, origfunc):
1028 1030 """build a ward for self.svfs"""
1029 1031 rref = weakref.ref(self)
1030 1032 def checksvfs(path, mode=None):
1031 1033 ret = origfunc(path, mode=mode)
1032 1034 repo = rref()
1033 1035 if repo is None or not util.safehasattr(repo, '_lockref'):
1034 1036 return
1035 1037 if mode in (None, 'r', 'rb'):
1036 1038 return
1037 1039 if path.startswith(repo.sharedpath):
1038 1040 # truncate name relative to the repository (.hg)
1039 1041 path = path[len(repo.sharedpath) + 1:]
1040 1042 if repo._currentlock(repo._lockref) is None:
1041 1043 repo.ui.develwarn('write with no lock: "%s"' % path,
1042 1044 stacklevel=3)
1043 1045 return ret
1044 1046 return checksvfs
1045 1047
1046 1048 def close(self):
1047 1049 self._writecaches()
1048 1050
1049 1051 def _writecaches(self):
1050 1052 if self._revbranchcache:
1051 1053 self._revbranchcache.write()
1052 1054
1053 1055 def _restrictcapabilities(self, caps):
1054 1056 if self.ui.configbool('experimental', 'bundle2-advertise'):
1055 1057 caps = set(caps)
1056 1058 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1057 1059 role='client'))
1058 1060 caps.add('bundle2=' + urlreq.quote(capsblob))
1059 1061 return caps
1060 1062
1061 1063 def _writerequirements(self):
1062 1064 scmutil.writerequires(self.vfs, self.requirements)
1063 1065
1064 1066 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1065 1067 # self -> auditor -> self._checknested -> self
1066 1068
1067 1069 @property
1068 1070 def auditor(self):
1069 1071 # This is only used by context.workingctx.match in order to
1070 1072 # detect files in subrepos.
1071 1073 return pathutil.pathauditor(self.root, callback=self._checknested)
1072 1074
1073 1075 @property
1074 1076 def nofsauditor(self):
1075 1077 # This is only used by context.basectx.match in order to detect
1076 1078 # files in subrepos.
1077 1079 return pathutil.pathauditor(self.root, callback=self._checknested,
1078 1080 realfs=False, cached=True)
1079 1081
1080 1082 def _checknested(self, path):
1081 1083 """Determine if path is a legal nested repository."""
1082 1084 if not path.startswith(self.root):
1083 1085 return False
1084 1086 subpath = path[len(self.root) + 1:]
1085 1087 normsubpath = util.pconvert(subpath)
1086 1088
1087 1089 # XXX: Checking against the current working copy is wrong in
1088 1090 # the sense that it can reject things like
1089 1091 #
1090 1092 # $ hg cat -r 10 sub/x.txt
1091 1093 #
1092 1094 # if sub/ is no longer a subrepository in the working copy
1093 1095 # parent revision.
1094 1096 #
1095 1097 # However, it can of course also allow things that would have
1096 1098 # been rejected before, such as the above cat command if sub/
1097 1099 # is a subrepository now, but was a normal directory before.
1098 1100 # The old path auditor would have rejected by mistake since it
1099 1101 # panics when it sees sub/.hg/.
1100 1102 #
1101 1103 # All in all, checking against the working copy seems sensible
1102 1104 # since we want to prevent access to nested repositories on
1103 1105 # the filesystem *now*.
1104 1106 ctx = self[None]
1105 1107 parts = util.splitpath(subpath)
1106 1108 while parts:
1107 1109 prefix = '/'.join(parts)
1108 1110 if prefix in ctx.substate:
1109 1111 if prefix == normsubpath:
1110 1112 return True
1111 1113 else:
1112 1114 sub = ctx.sub(prefix)
1113 1115 return sub.checknested(subpath[len(prefix) + 1:])
1114 1116 else:
1115 1117 parts.pop()
1116 1118 return False
1117 1119
1118 1120 def peer(self):
1119 1121 return localpeer(self) # not cached to avoid reference cycle
1120 1122
1121 1123 def unfiltered(self):
1122 1124 """Return unfiltered version of the repository
1123 1125
1124 1126 Intended to be overwritten by filtered repo."""
1125 1127 return self
1126 1128
1127 1129 def filtered(self, name, visibilityexceptions=None):
1128 1130 """Return a filtered version of a repository"""
1129 1131 cls = repoview.newtype(self.unfiltered().__class__)
1130 1132 return cls(self, name, visibilityexceptions)
1131 1133
1132 1134 @repofilecache('bookmarks', 'bookmarks.current')
1133 1135 def _bookmarks(self):
1134 1136 return bookmarks.bmstore(self)
1135 1137
1136 1138 @property
1137 1139 def _activebookmark(self):
1138 1140 return self._bookmarks.active
1139 1141
1140 1142 # _phasesets depend on changelog. what we need is to call
1141 1143 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1142 1144 # can't be easily expressed in filecache mechanism.
1143 1145 @storecache('phaseroots', '00changelog.i')
1144 1146 def _phasecache(self):
1145 1147 return phases.phasecache(self, self._phasedefaults)
1146 1148
1147 1149 @storecache('obsstore')
1148 1150 def obsstore(self):
1149 1151 return obsolete.makestore(self.ui, self)
1150 1152
1151 1153 @storecache('00changelog.i')
1152 1154 def changelog(self):
1153 1155 return changelog.changelog(self.svfs,
1154 1156 trypending=txnutil.mayhavepending(self.root))
1155 1157
1156 1158 @storecache('00manifest.i')
1157 1159 def manifestlog(self):
1158 1160 rootstore = manifest.manifestrevlog(self.svfs)
1159 1161 return manifest.manifestlog(self.svfs, self, rootstore)
1160 1162
1161 1163 @repofilecache('dirstate')
1162 1164 def dirstate(self):
1163 1165 return self._makedirstate()
1164 1166
1165 1167 def _makedirstate(self):
1166 1168 """Extension point for wrapping the dirstate per-repo."""
1167 1169 sparsematchfn = lambda: sparse.matcher(self)
1168 1170
1169 1171 return dirstate.dirstate(self.vfs, self.ui, self.root,
1170 1172 self._dirstatevalidate, sparsematchfn)
1171 1173
1172 1174 def _dirstatevalidate(self, node):
1173 1175 try:
1174 1176 self.changelog.rev(node)
1175 1177 return node
1176 1178 except error.LookupError:
1177 1179 if not self._dirstatevalidatewarned:
1178 1180 self._dirstatevalidatewarned = True
1179 1181 self.ui.warn(_("warning: ignoring unknown"
1180 1182 " working parent %s!\n") % short(node))
1181 1183 return nullid
1182 1184
1183 1185 @storecache(narrowspec.FILENAME)
1184 1186 def narrowpats(self):
1185 1187 """matcher patterns for this repository's narrowspec
1186 1188
1187 1189 A tuple of (includes, excludes).
1188 1190 """
1189 1191 return narrowspec.load(self)
1190 1192
1191 1193 @storecache(narrowspec.FILENAME)
1192 1194 def _narrowmatch(self):
1193 1195 if repository.NARROW_REQUIREMENT not in self.requirements:
1194 1196 return matchmod.always(self.root, '')
1195 1197 include, exclude = self.narrowpats
1196 1198 return narrowspec.match(self.root, include=include, exclude=exclude)
1197 1199
1198 1200 # TODO(martinvonz): make this property-like instead?
1199 1201 def narrowmatch(self):
1200 1202 return self._narrowmatch
1201 1203
1202 1204 def setnarrowpats(self, newincludes, newexcludes):
1203 1205 narrowspec.save(self, newincludes, newexcludes)
1204 1206 self.invalidate(clearfilecache=True)
1205 1207
1206 1208 def __getitem__(self, changeid):
1207 1209 if changeid is None:
1208 1210 return context.workingctx(self)
1209 1211 if isinstance(changeid, context.basectx):
1210 1212 return changeid
1211 1213 if isinstance(changeid, slice):
1212 1214 # wdirrev isn't contiguous so the slice shouldn't include it
1213 1215 return [self[i]
1214 1216 for i in pycompat.xrange(*changeid.indices(len(self)))
1215 1217 if i not in self.changelog.filteredrevs]
1216 1218 try:
1217 return context.changectx(self, changeid)
1219 if isinstance(changeid, int):
1220 node = self.changelog.node(changeid)
1221 rev = changeid
1222 return context.changectx(self, rev, node)
1223 elif changeid == 'null':
1224 node = nullid
1225 rev = nullrev
1226 return context.changectx(self, rev, node)
1227 elif changeid == 'tip':
1228 node = self.changelog.tip()
1229 rev = self.changelog.rev(node)
1230 return context.changectx(self, rev, node)
1231 elif (changeid == '.'
1232 or self.local() and changeid == self.dirstate.p1()):
1233 # this is a hack to delay/avoid loading obsmarkers
1234 # when we know that '.' won't be hidden
1235 node = self.dirstate.p1()
1236 rev = self.unfiltered().changelog.rev(node)
1237 return context.changectx(self, rev, node)
1238 elif len(changeid) == 20:
1239 try:
1240 node = changeid
1241 rev = self.changelog.rev(changeid)
1242 return context.changectx(self, rev, node)
1243 except error.FilteredLookupError:
1244 changeid = hex(changeid) # for the error message
1245 raise
1246 except LookupError:
1247 # check if it might have come from damaged dirstate
1248 #
1249 # XXX we could avoid the unfiltered if we had a recognizable
1250 # exception for filtered changeset access
1251 if (self.local()
1252 and changeid in self.unfiltered().dirstate.parents()):
1253 msg = _("working directory has unknown parent '%s'!")
1254 raise error.Abort(msg % short(changeid))
1255 changeid = hex(changeid) # for the error message
1256
1257 elif len(changeid) == 40:
1258 try:
1259 node = bin(changeid)
1260 rev = self.changelog.rev(node)
1261 return context.changectx(self, rev, node)
1262 except error.FilteredLookupError:
1263 raise
1264 except LookupError:
1265 pass
1266 else:
1267 raise error.ProgrammingError(
1268 "unsupported changeid '%s' of type %s" %
1269 (changeid, type(changeid)))
1270
1271 except (error.FilteredIndexError, error.FilteredLookupError):
1272 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1273 % pycompat.bytestr(changeid))
1274 except IndexError:
1275 pass
1218 1276 except error.WdirUnsupported:
1219 1277 return context.workingctx(self)
1278 raise error.RepoLookupError(
1279 _("unknown revision '%s'") % changeid)
1220 1280
1221 1281 def __contains__(self, changeid):
1222 1282 """True if the given changeid exists
1223 1283
1224 1284 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1225 1285 specified.
1226 1286 """
1227 1287 try:
1228 1288 self[changeid]
1229 1289 return True
1230 1290 except error.RepoLookupError:
1231 1291 return False
1232 1292
1233 1293 def __nonzero__(self):
1234 1294 return True
1235 1295
1236 1296 __bool__ = __nonzero__
1237 1297
1238 1298 def __len__(self):
1239 1299 # no need to pay the cost of repoview.changelog
1240 1300 unfi = self.unfiltered()
1241 1301 return len(unfi.changelog)
1242 1302
1243 1303 def __iter__(self):
1244 1304 return iter(self.changelog)
1245 1305
1246 1306 def revs(self, expr, *args):
1247 1307 '''Find revisions matching a revset.
1248 1308
1249 1309 The revset is specified as a string ``expr`` that may contain
1250 1310 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1251 1311
1252 1312 Revset aliases from the configuration are not expanded. To expand
1253 1313 user aliases, consider calling ``scmutil.revrange()`` or
1254 1314 ``repo.anyrevs([expr], user=True)``.
1255 1315
1256 1316 Returns a revset.abstractsmartset, which is a list-like interface
1257 1317 that contains integer revisions.
1258 1318 '''
1259 1319 expr = revsetlang.formatspec(expr, *args)
1260 1320 m = revset.match(None, expr)
1261 1321 return m(self)
1262 1322
1263 1323 def set(self, expr, *args):
1264 1324 '''Find revisions matching a revset and emit changectx instances.
1265 1325
1266 1326 This is a convenience wrapper around ``revs()`` that iterates the
1267 1327 result and is a generator of changectx instances.
1268 1328
1269 1329 Revset aliases from the configuration are not expanded. To expand
1270 1330 user aliases, consider calling ``scmutil.revrange()``.
1271 1331 '''
1272 1332 for r in self.revs(expr, *args):
1273 1333 yield self[r]
1274 1334
1275 1335 def anyrevs(self, specs, user=False, localalias=None):
1276 1336 '''Find revisions matching one of the given revsets.
1277 1337
1278 1338 Revset aliases from the configuration are not expanded by default. To
1279 1339 expand user aliases, specify ``user=True``. To provide some local
1280 1340 definitions overriding user aliases, set ``localalias`` to
1281 1341 ``{name: definitionstring}``.
1282 1342 '''
1283 1343 if user:
1284 1344 m = revset.matchany(self.ui, specs,
1285 1345 lookup=revset.lookupfn(self),
1286 1346 localalias=localalias)
1287 1347 else:
1288 1348 m = revset.matchany(None, specs, localalias=localalias)
1289 1349 return m(self)
1290 1350
1291 1351 def url(self):
1292 1352 return 'file:' + self.root
1293 1353
1294 1354 def hook(self, name, throw=False, **args):
1295 1355 """Call a hook, passing this repo instance.
1296 1356
1297 1357 This a convenience method to aid invoking hooks. Extensions likely
1298 1358 won't call this unless they have registered a custom hook or are
1299 1359 replacing code that is expected to call a hook.
1300 1360 """
1301 1361 return hook.hook(self.ui, self, name, throw, **args)
1302 1362
1303 1363 @filteredpropertycache
1304 1364 def _tagscache(self):
1305 1365 '''Returns a tagscache object that contains various tags related
1306 1366 caches.'''
1307 1367
1308 1368 # This simplifies its cache management by having one decorated
1309 1369 # function (this one) and the rest simply fetch things from it.
1310 1370 class tagscache(object):
1311 1371 def __init__(self):
1312 1372 # These two define the set of tags for this repository. tags
1313 1373 # maps tag name to node; tagtypes maps tag name to 'global' or
1314 1374 # 'local'. (Global tags are defined by .hgtags across all
1315 1375 # heads, and local tags are defined in .hg/localtags.)
1316 1376 # They constitute the in-memory cache of tags.
1317 1377 self.tags = self.tagtypes = None
1318 1378
1319 1379 self.nodetagscache = self.tagslist = None
1320 1380
1321 1381 cache = tagscache()
1322 1382 cache.tags, cache.tagtypes = self._findtags()
1323 1383
1324 1384 return cache
1325 1385
1326 1386 def tags(self):
1327 1387 '''return a mapping of tag to node'''
1328 1388 t = {}
1329 1389 if self.changelog.filteredrevs:
1330 1390 tags, tt = self._findtags()
1331 1391 else:
1332 1392 tags = self._tagscache.tags
1333 1393 for k, v in tags.iteritems():
1334 1394 try:
1335 1395 # ignore tags to unknown nodes
1336 1396 self.changelog.rev(v)
1337 1397 t[k] = v
1338 1398 except (error.LookupError, ValueError):
1339 1399 pass
1340 1400 return t
1341 1401
1342 1402 def _findtags(self):
1343 1403 '''Do the hard work of finding tags. Return a pair of dicts
1344 1404 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1345 1405 maps tag name to a string like \'global\' or \'local\'.
1346 1406 Subclasses or extensions are free to add their own tags, but
1347 1407 should be aware that the returned dicts will be retained for the
1348 1408 duration of the localrepo object.'''
1349 1409
1350 1410 # XXX what tagtype should subclasses/extensions use? Currently
1351 1411 # mq and bookmarks add tags, but do not set the tagtype at all.
1352 1412 # Should each extension invent its own tag type? Should there
1353 1413 # be one tagtype for all such "virtual" tags? Or is the status
1354 1414 # quo fine?
1355 1415
1356 1416
1357 1417 # map tag name to (node, hist)
1358 1418 alltags = tagsmod.findglobaltags(self.ui, self)
1359 1419 # map tag name to tag type
1360 1420 tagtypes = dict((tag, 'global') for tag in alltags)
1361 1421
1362 1422 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1363 1423
1364 1424 # Build the return dicts. Have to re-encode tag names because
1365 1425 # the tags module always uses UTF-8 (in order not to lose info
1366 1426 # writing to the cache), but the rest of Mercurial wants them in
1367 1427 # local encoding.
1368 1428 tags = {}
1369 1429 for (name, (node, hist)) in alltags.iteritems():
1370 1430 if node != nullid:
1371 1431 tags[encoding.tolocal(name)] = node
1372 1432 tags['tip'] = self.changelog.tip()
1373 1433 tagtypes = dict([(encoding.tolocal(name), value)
1374 1434 for (name, value) in tagtypes.iteritems()])
1375 1435 return (tags, tagtypes)
1376 1436
1377 1437 def tagtype(self, tagname):
1378 1438 '''
1379 1439 return the type of the given tag. result can be:
1380 1440
1381 1441 'local' : a local tag
1382 1442 'global' : a global tag
1383 1443 None : tag does not exist
1384 1444 '''
1385 1445
1386 1446 return self._tagscache.tagtypes.get(tagname)
1387 1447
1388 1448 def tagslist(self):
1389 1449 '''return a list of tags ordered by revision'''
1390 1450 if not self._tagscache.tagslist:
1391 1451 l = []
1392 1452 for t, n in self.tags().iteritems():
1393 1453 l.append((self.changelog.rev(n), t, n))
1394 1454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1395 1455
1396 1456 return self._tagscache.tagslist
1397 1457
1398 1458 def nodetags(self, node):
1399 1459 '''return the tags associated with a node'''
1400 1460 if not self._tagscache.nodetagscache:
1401 1461 nodetagscache = {}
1402 1462 for t, n in self._tagscache.tags.iteritems():
1403 1463 nodetagscache.setdefault(n, []).append(t)
1404 1464 for tags in nodetagscache.itervalues():
1405 1465 tags.sort()
1406 1466 self._tagscache.nodetagscache = nodetagscache
1407 1467 return self._tagscache.nodetagscache.get(node, [])
1408 1468
1409 1469 def nodebookmarks(self, node):
1410 1470 """return the list of bookmarks pointing to the specified node"""
1411 1471 return self._bookmarks.names(node)
1412 1472
1413 1473 def branchmap(self):
1414 1474 '''returns a dictionary {branch: [branchheads]} with branchheads
1415 1475 ordered by increasing revision number'''
1416 1476 branchmap.updatecache(self)
1417 1477 return self._branchcaches[self.filtername]
1418 1478
1419 1479 @unfilteredmethod
1420 1480 def revbranchcache(self):
1421 1481 if not self._revbranchcache:
1422 1482 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1423 1483 return self._revbranchcache
1424 1484
1425 1485 def branchtip(self, branch, ignoremissing=False):
1426 1486 '''return the tip node for a given branch
1427 1487
1428 1488 If ignoremissing is True, then this method will not raise an error.
1429 1489 This is helpful for callers that only expect None for a missing branch
1430 1490 (e.g. namespace).
1431 1491
1432 1492 '''
1433 1493 try:
1434 1494 return self.branchmap().branchtip(branch)
1435 1495 except KeyError:
1436 1496 if not ignoremissing:
1437 1497 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1438 1498 else:
1439 1499 pass
1440 1500
1441 1501 def lookup(self, key):
1442 1502 return scmutil.revsymbol(self, key).node()
1443 1503
1444 1504 def lookupbranch(self, key):
1445 1505 if key in self.branchmap():
1446 1506 return key
1447 1507
1448 1508 return scmutil.revsymbol(self, key).branch()
1449 1509
1450 1510 def known(self, nodes):
1451 1511 cl = self.changelog
1452 1512 nm = cl.nodemap
1453 1513 filtered = cl.filteredrevs
1454 1514 result = []
1455 1515 for n in nodes:
1456 1516 r = nm.get(n)
1457 1517 resp = not (r is None or r in filtered)
1458 1518 result.append(resp)
1459 1519 return result
1460 1520
1461 1521 def local(self):
1462 1522 return self
1463 1523
1464 1524 def publishing(self):
1465 1525 # it's safe (and desirable) to trust the publish flag unconditionally
1466 1526 # so that we don't finalize changes shared between users via ssh or nfs
1467 1527 return self.ui.configbool('phases', 'publish', untrusted=True)
1468 1528
1469 1529 def cancopy(self):
1470 1530 # so statichttprepo's override of local() works
1471 1531 if not self.local():
1472 1532 return False
1473 1533 if not self.publishing():
1474 1534 return True
1475 1535 # if publishing we can't copy if there is filtered content
1476 1536 return not self.filtered('visible').changelog.filteredrevs
1477 1537
1478 1538 def shared(self):
1479 1539 '''the type of shared repository (None if not shared)'''
1480 1540 if self.sharedpath != self.path:
1481 1541 return 'store'
1482 1542 return None
1483 1543
1484 1544 def wjoin(self, f, *insidef):
1485 1545 return self.vfs.reljoin(self.root, f, *insidef)
1486 1546
1487 1547 def setparents(self, p1, p2=nullid):
1488 1548 with self.dirstate.parentchange():
1489 1549 copies = self.dirstate.setparents(p1, p2)
1490 1550 pctx = self[p1]
1491 1551 if copies:
1492 1552 # Adjust copy records, the dirstate cannot do it, it
1493 1553 # requires access to parents manifests. Preserve them
1494 1554 # only for entries added to first parent.
1495 1555 for f in copies:
1496 1556 if f not in pctx and copies[f] in pctx:
1497 1557 self.dirstate.copy(copies[f], f)
1498 1558 if p2 == nullid:
1499 1559 for f, s in sorted(self.dirstate.copies().items()):
1500 1560 if f not in pctx and s not in pctx:
1501 1561 self.dirstate.copy(None, f)
1502 1562
1503 1563 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1504 1564 """changeid can be a changeset revision, node, or tag.
1505 1565 fileid can be a file revision or node."""
1506 1566 return context.filectx(self, path, changeid, fileid,
1507 1567 changectx=changectx)
1508 1568
1509 1569 def getcwd(self):
1510 1570 return self.dirstate.getcwd()
1511 1571
1512 1572 def pathto(self, f, cwd=None):
1513 1573 return self.dirstate.pathto(f, cwd)
1514 1574
1515 1575 def _loadfilter(self, filter):
1516 1576 if filter not in self._filterpats:
1517 1577 l = []
1518 1578 for pat, cmd in self.ui.configitems(filter):
1519 1579 if cmd == '!':
1520 1580 continue
1521 1581 mf = matchmod.match(self.root, '', [pat])
1522 1582 fn = None
1523 1583 params = cmd
1524 1584 for name, filterfn in self._datafilters.iteritems():
1525 1585 if cmd.startswith(name):
1526 1586 fn = filterfn
1527 1587 params = cmd[len(name):].lstrip()
1528 1588 break
1529 1589 if not fn:
1530 1590 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1531 1591 # Wrap old filters not supporting keyword arguments
1532 1592 if not pycompat.getargspec(fn)[2]:
1533 1593 oldfn = fn
1534 1594 fn = lambda s, c, **kwargs: oldfn(s, c)
1535 1595 l.append((mf, fn, params))
1536 1596 self._filterpats[filter] = l
1537 1597 return self._filterpats[filter]
1538 1598
1539 1599 def _filter(self, filterpats, filename, data):
1540 1600 for mf, fn, cmd in filterpats:
1541 1601 if mf(filename):
1542 1602 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1543 1603 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1544 1604 break
1545 1605
1546 1606 return data
1547 1607
1548 1608 @unfilteredpropertycache
1549 1609 def _encodefilterpats(self):
1550 1610 return self._loadfilter('encode')
1551 1611
1552 1612 @unfilteredpropertycache
1553 1613 def _decodefilterpats(self):
1554 1614 return self._loadfilter('decode')
1555 1615
1556 1616 def adddatafilter(self, name, filter):
1557 1617 self._datafilters[name] = filter
1558 1618
1559 1619 def wread(self, filename):
1560 1620 if self.wvfs.islink(filename):
1561 1621 data = self.wvfs.readlink(filename)
1562 1622 else:
1563 1623 data = self.wvfs.read(filename)
1564 1624 return self._filter(self._encodefilterpats, filename, data)
1565 1625
1566 1626 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1567 1627 """write ``data`` into ``filename`` in the working directory
1568 1628
1569 1629 This returns length of written (maybe decoded) data.
1570 1630 """
1571 1631 data = self._filter(self._decodefilterpats, filename, data)
1572 1632 if 'l' in flags:
1573 1633 self.wvfs.symlink(data, filename)
1574 1634 else:
1575 1635 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1576 1636 **kwargs)
1577 1637 if 'x' in flags:
1578 1638 self.wvfs.setflags(filename, False, True)
1579 1639 else:
1580 1640 self.wvfs.setflags(filename, False, False)
1581 1641 return len(data)
1582 1642
1583 1643 def wwritedata(self, filename, data):
1584 1644 return self._filter(self._decodefilterpats, filename, data)
1585 1645
1586 1646 def currenttransaction(self):
1587 1647 """return the current transaction or None if non exists"""
1588 1648 if self._transref:
1589 1649 tr = self._transref()
1590 1650 else:
1591 1651 tr = None
1592 1652
1593 1653 if tr and tr.running():
1594 1654 return tr
1595 1655 return None
1596 1656
1597 1657 def transaction(self, desc, report=None):
1598 1658 if (self.ui.configbool('devel', 'all-warnings')
1599 1659 or self.ui.configbool('devel', 'check-locks')):
1600 1660 if self._currentlock(self._lockref) is None:
1601 1661 raise error.ProgrammingError('transaction requires locking')
1602 1662 tr = self.currenttransaction()
1603 1663 if tr is not None:
1604 1664 return tr.nest(name=desc)
1605 1665
1606 1666 # abort here if the journal already exists
1607 1667 if self.svfs.exists("journal"):
1608 1668 raise error.RepoError(
1609 1669 _("abandoned transaction found"),
1610 1670 hint=_("run 'hg recover' to clean up transaction"))
1611 1671
1612 1672 idbase = "%.40f#%f" % (random.random(), time.time())
1613 1673 ha = hex(hashlib.sha1(idbase).digest())
1614 1674 txnid = 'TXN:' + ha
1615 1675 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1616 1676
1617 1677 self._writejournal(desc)
1618 1678 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1619 1679 if report:
1620 1680 rp = report
1621 1681 else:
1622 1682 rp = self.ui.warn
1623 1683 vfsmap = {'plain': self.vfs} # root of .hg/
1624 1684 # we must avoid cyclic reference between repo and transaction.
1625 1685 reporef = weakref.ref(self)
1626 1686 # Code to track tag movement
1627 1687 #
1628 1688 # Since tags are all handled as file content, it is actually quite hard
1629 1689 # to track these movement from a code perspective. So we fallback to a
1630 1690 # tracking at the repository level. One could envision to track changes
1631 1691 # to the '.hgtags' file through changegroup apply but that fails to
1632 1692 # cope with case where transaction expose new heads without changegroup
1633 1693 # being involved (eg: phase movement).
1634 1694 #
1635 1695 # For now, We gate the feature behind a flag since this likely comes
1636 1696 # with performance impacts. The current code run more often than needed
1637 1697 # and do not use caches as much as it could. The current focus is on
1638 1698 # the behavior of the feature so we disable it by default. The flag
1639 1699 # will be removed when we are happy with the performance impact.
1640 1700 #
1641 1701 # Once this feature is no longer experimental move the following
1642 1702 # documentation to the appropriate help section:
1643 1703 #
1644 1704 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1645 1705 # tags (new or changed or deleted tags). In addition the details of
1646 1706 # these changes are made available in a file at:
1647 1707 # ``REPOROOT/.hg/changes/tags.changes``.
1648 1708 # Make sure you check for HG_TAG_MOVED before reading that file as it
1649 1709 # might exist from a previous transaction even if no tag were touched
1650 1710 # in this one. Changes are recorded in a line base format::
1651 1711 #
1652 1712 # <action> <hex-node> <tag-name>\n
1653 1713 #
1654 1714 # Actions are defined as follow:
1655 1715 # "-R": tag is removed,
1656 1716 # "+A": tag is added,
1657 1717 # "-M": tag is moved (old value),
1658 1718 # "+M": tag is moved (new value),
1659 1719 tracktags = lambda x: None
1660 1720 # experimental config: experimental.hook-track-tags
1661 1721 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1662 1722 if desc != 'strip' and shouldtracktags:
1663 1723 oldheads = self.changelog.headrevs()
1664 1724 def tracktags(tr2):
1665 1725 repo = reporef()
1666 1726 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1667 1727 newheads = repo.changelog.headrevs()
1668 1728 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1669 1729 # notes: we compare lists here.
1670 1730 # As we do it only once buiding set would not be cheaper
1671 1731 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1672 1732 if changes:
1673 1733 tr2.hookargs['tag_moved'] = '1'
1674 1734 with repo.vfs('changes/tags.changes', 'w',
1675 1735 atomictemp=True) as changesfile:
1676 1736 # note: we do not register the file to the transaction
1677 1737 # because we needs it to still exist on the transaction
1678 1738 # is close (for txnclose hooks)
1679 1739 tagsmod.writediff(changesfile, changes)
1680 1740 def validate(tr2):
1681 1741 """will run pre-closing hooks"""
1682 1742 # XXX the transaction API is a bit lacking here so we take a hacky
1683 1743 # path for now
1684 1744 #
1685 1745 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1686 1746 # dict is copied before these run. In addition we needs the data
1687 1747 # available to in memory hooks too.
1688 1748 #
1689 1749 # Moreover, we also need to make sure this runs before txnclose
1690 1750 # hooks and there is no "pending" mechanism that would execute
1691 1751 # logic only if hooks are about to run.
1692 1752 #
1693 1753 # Fixing this limitation of the transaction is also needed to track
1694 1754 # other families of changes (bookmarks, phases, obsolescence).
1695 1755 #
1696 1756 # This will have to be fixed before we remove the experimental
1697 1757 # gating.
1698 1758 tracktags(tr2)
1699 1759 repo = reporef()
1700 1760 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1701 1761 scmutil.enforcesinglehead(repo, tr2, desc)
1702 1762 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1703 1763 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1704 1764 args = tr.hookargs.copy()
1705 1765 args.update(bookmarks.preparehookargs(name, old, new))
1706 1766 repo.hook('pretxnclose-bookmark', throw=True,
1707 1767 txnname=desc,
1708 1768 **pycompat.strkwargs(args))
1709 1769 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1710 1770 cl = repo.unfiltered().changelog
1711 1771 for rev, (old, new) in tr.changes['phases'].items():
1712 1772 args = tr.hookargs.copy()
1713 1773 node = hex(cl.node(rev))
1714 1774 args.update(phases.preparehookargs(node, old, new))
1715 1775 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1716 1776 **pycompat.strkwargs(args))
1717 1777
1718 1778 repo.hook('pretxnclose', throw=True,
1719 1779 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1720 1780 def releasefn(tr, success):
1721 1781 repo = reporef()
1722 1782 if success:
1723 1783 # this should be explicitly invoked here, because
1724 1784 # in-memory changes aren't written out at closing
1725 1785 # transaction, if tr.addfilegenerator (via
1726 1786 # dirstate.write or so) isn't invoked while
1727 1787 # transaction running
1728 1788 repo.dirstate.write(None)
1729 1789 else:
1730 1790 # discard all changes (including ones already written
1731 1791 # out) in this transaction
1732 1792 narrowspec.restorebackup(self, 'journal.narrowspec')
1733 1793 repo.dirstate.restorebackup(None, 'journal.dirstate')
1734 1794
1735 1795 repo.invalidate(clearfilecache=True)
1736 1796
1737 1797 tr = transaction.transaction(rp, self.svfs, vfsmap,
1738 1798 "journal",
1739 1799 "undo",
1740 1800 aftertrans(renames),
1741 1801 self.store.createmode,
1742 1802 validator=validate,
1743 1803 releasefn=releasefn,
1744 1804 checkambigfiles=_cachedfiles,
1745 1805 name=desc)
1746 1806 tr.changes['origrepolen'] = len(self)
1747 1807 tr.changes['obsmarkers'] = set()
1748 1808 tr.changes['phases'] = {}
1749 1809 tr.changes['bookmarks'] = {}
1750 1810
1751 1811 tr.hookargs['txnid'] = txnid
1752 1812 # note: writing the fncache only during finalize mean that the file is
1753 1813 # outdated when running hooks. As fncache is used for streaming clone,
1754 1814 # this is not expected to break anything that happen during the hooks.
1755 1815 tr.addfinalize('flush-fncache', self.store.write)
1756 1816 def txnclosehook(tr2):
1757 1817 """To be run if transaction is successful, will schedule a hook run
1758 1818 """
1759 1819 # Don't reference tr2 in hook() so we don't hold a reference.
1760 1820 # This reduces memory consumption when there are multiple
1761 1821 # transactions per lock. This can likely go away if issue5045
1762 1822 # fixes the function accumulation.
1763 1823 hookargs = tr2.hookargs
1764 1824
1765 1825 def hookfunc():
1766 1826 repo = reporef()
1767 1827 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1768 1828 bmchanges = sorted(tr.changes['bookmarks'].items())
1769 1829 for name, (old, new) in bmchanges:
1770 1830 args = tr.hookargs.copy()
1771 1831 args.update(bookmarks.preparehookargs(name, old, new))
1772 1832 repo.hook('txnclose-bookmark', throw=False,
1773 1833 txnname=desc, **pycompat.strkwargs(args))
1774 1834
1775 1835 if hook.hashook(repo.ui, 'txnclose-phase'):
1776 1836 cl = repo.unfiltered().changelog
1777 1837 phasemv = sorted(tr.changes['phases'].items())
1778 1838 for rev, (old, new) in phasemv:
1779 1839 args = tr.hookargs.copy()
1780 1840 node = hex(cl.node(rev))
1781 1841 args.update(phases.preparehookargs(node, old, new))
1782 1842 repo.hook('txnclose-phase', throw=False, txnname=desc,
1783 1843 **pycompat.strkwargs(args))
1784 1844
1785 1845 repo.hook('txnclose', throw=False, txnname=desc,
1786 1846 **pycompat.strkwargs(hookargs))
1787 1847 reporef()._afterlock(hookfunc)
1788 1848 tr.addfinalize('txnclose-hook', txnclosehook)
1789 1849 # Include a leading "-" to make it happen before the transaction summary
1790 1850 # reports registered via scmutil.registersummarycallback() whose names
1791 1851 # are 00-txnreport etc. That way, the caches will be warm when the
1792 1852 # callbacks run.
1793 1853 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1794 1854 def txnaborthook(tr2):
1795 1855 """To be run if transaction is aborted
1796 1856 """
1797 1857 reporef().hook('txnabort', throw=False, txnname=desc,
1798 1858 **pycompat.strkwargs(tr2.hookargs))
1799 1859 tr.addabort('txnabort-hook', txnaborthook)
1800 1860 # avoid eager cache invalidation. in-memory data should be identical
1801 1861 # to stored data if transaction has no error.
1802 1862 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1803 1863 self._transref = weakref.ref(tr)
1804 1864 scmutil.registersummarycallback(self, tr, desc)
1805 1865 return tr
1806 1866
1807 1867 def _journalfiles(self):
1808 1868 return ((self.svfs, 'journal'),
1809 1869 (self.vfs, 'journal.dirstate'),
1810 1870 (self.vfs, 'journal.branch'),
1811 1871 (self.vfs, 'journal.desc'),
1812 1872 (self.vfs, 'journal.bookmarks'),
1813 1873 (self.svfs, 'journal.phaseroots'))
1814 1874
1815 1875 def undofiles(self):
1816 1876 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1817 1877
1818 1878 @unfilteredmethod
1819 1879 def _writejournal(self, desc):
1820 1880 self.dirstate.savebackup(None, 'journal.dirstate')
1821 1881 narrowspec.savebackup(self, 'journal.narrowspec')
1822 1882 self.vfs.write("journal.branch",
1823 1883 encoding.fromlocal(self.dirstate.branch()))
1824 1884 self.vfs.write("journal.desc",
1825 1885 "%d\n%s\n" % (len(self), desc))
1826 1886 self.vfs.write("journal.bookmarks",
1827 1887 self.vfs.tryread("bookmarks"))
1828 1888 self.svfs.write("journal.phaseroots",
1829 1889 self.svfs.tryread("phaseroots"))
1830 1890
1831 1891 def recover(self):
1832 1892 with self.lock():
1833 1893 if self.svfs.exists("journal"):
1834 1894 self.ui.status(_("rolling back interrupted transaction\n"))
1835 1895 vfsmap = {'': self.svfs,
1836 1896 'plain': self.vfs,}
1837 1897 transaction.rollback(self.svfs, vfsmap, "journal",
1838 1898 self.ui.warn,
1839 1899 checkambigfiles=_cachedfiles)
1840 1900 self.invalidate()
1841 1901 return True
1842 1902 else:
1843 1903 self.ui.warn(_("no interrupted transaction available\n"))
1844 1904 return False
1845 1905
1846 1906 def rollback(self, dryrun=False, force=False):
1847 1907 wlock = lock = dsguard = None
1848 1908 try:
1849 1909 wlock = self.wlock()
1850 1910 lock = self.lock()
1851 1911 if self.svfs.exists("undo"):
1852 1912 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1853 1913
1854 1914 return self._rollback(dryrun, force, dsguard)
1855 1915 else:
1856 1916 self.ui.warn(_("no rollback information available\n"))
1857 1917 return 1
1858 1918 finally:
1859 1919 release(dsguard, lock, wlock)
1860 1920
1861 1921 @unfilteredmethod # Until we get smarter cache management
1862 1922 def _rollback(self, dryrun, force, dsguard):
1863 1923 ui = self.ui
1864 1924 try:
1865 1925 args = self.vfs.read('undo.desc').splitlines()
1866 1926 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1867 1927 if len(args) >= 3:
1868 1928 detail = args[2]
1869 1929 oldtip = oldlen - 1
1870 1930
1871 1931 if detail and ui.verbose:
1872 1932 msg = (_('repository tip rolled back to revision %d'
1873 1933 ' (undo %s: %s)\n')
1874 1934 % (oldtip, desc, detail))
1875 1935 else:
1876 1936 msg = (_('repository tip rolled back to revision %d'
1877 1937 ' (undo %s)\n')
1878 1938 % (oldtip, desc))
1879 1939 except IOError:
1880 1940 msg = _('rolling back unknown transaction\n')
1881 1941 desc = None
1882 1942
1883 1943 if not force and self['.'] != self['tip'] and desc == 'commit':
1884 1944 raise error.Abort(
1885 1945 _('rollback of last commit while not checked out '
1886 1946 'may lose data'), hint=_('use -f to force'))
1887 1947
1888 1948 ui.status(msg)
1889 1949 if dryrun:
1890 1950 return 0
1891 1951
1892 1952 parents = self.dirstate.parents()
1893 1953 self.destroying()
1894 1954 vfsmap = {'plain': self.vfs, '': self.svfs}
1895 1955 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1896 1956 checkambigfiles=_cachedfiles)
1897 1957 if self.vfs.exists('undo.bookmarks'):
1898 1958 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1899 1959 if self.svfs.exists('undo.phaseroots'):
1900 1960 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1901 1961 self.invalidate()
1902 1962
1903 1963 parentgone = (parents[0] not in self.changelog.nodemap or
1904 1964 parents[1] not in self.changelog.nodemap)
1905 1965 if parentgone:
1906 1966 # prevent dirstateguard from overwriting already restored one
1907 1967 dsguard.close()
1908 1968
1909 1969 narrowspec.restorebackup(self, 'undo.narrowspec')
1910 1970 self.dirstate.restorebackup(None, 'undo.dirstate')
1911 1971 try:
1912 1972 branch = self.vfs.read('undo.branch')
1913 1973 self.dirstate.setbranch(encoding.tolocal(branch))
1914 1974 except IOError:
1915 1975 ui.warn(_('named branch could not be reset: '
1916 1976 'current branch is still \'%s\'\n')
1917 1977 % self.dirstate.branch())
1918 1978
1919 1979 parents = tuple([p.rev() for p in self[None].parents()])
1920 1980 if len(parents) > 1:
1921 1981 ui.status(_('working directory now based on '
1922 1982 'revisions %d and %d\n') % parents)
1923 1983 else:
1924 1984 ui.status(_('working directory now based on '
1925 1985 'revision %d\n') % parents)
1926 1986 mergemod.mergestate.clean(self, self['.'].node())
1927 1987
1928 1988 # TODO: if we know which new heads may result from this rollback, pass
1929 1989 # them to destroy(), which will prevent the branchhead cache from being
1930 1990 # invalidated.
1931 1991 self.destroyed()
1932 1992 return 0
1933 1993
1934 1994 def _buildcacheupdater(self, newtransaction):
1935 1995 """called during transaction to build the callback updating cache
1936 1996
1937 1997 Lives on the repository to help extension who might want to augment
1938 1998 this logic. For this purpose, the created transaction is passed to the
1939 1999 method.
1940 2000 """
1941 2001 # we must avoid cyclic reference between repo and transaction.
1942 2002 reporef = weakref.ref(self)
1943 2003 def updater(tr):
1944 2004 repo = reporef()
1945 2005 repo.updatecaches(tr)
1946 2006 return updater
1947 2007
1948 2008 @unfilteredmethod
1949 2009 def updatecaches(self, tr=None, full=False):
1950 2010 """warm appropriate caches
1951 2011
1952 2012 If this function is called after a transaction closed. The transaction
1953 2013 will be available in the 'tr' argument. This can be used to selectively
1954 2014 update caches relevant to the changes in that transaction.
1955 2015
1956 2016 If 'full' is set, make sure all caches the function knows about have
1957 2017 up-to-date data. Even the ones usually loaded more lazily.
1958 2018 """
1959 2019 if tr is not None and tr.hookargs.get('source') == 'strip':
1960 2020 # During strip, many caches are invalid but
1961 2021 # later call to `destroyed` will refresh them.
1962 2022 return
1963 2023
1964 2024 if tr is None or tr.changes['origrepolen'] < len(self):
1965 2025 # updating the unfiltered branchmap should refresh all the others,
1966 2026 self.ui.debug('updating the branch cache\n')
1967 2027 branchmap.updatecache(self.filtered('served'))
1968 2028
1969 2029 if full:
1970 2030 rbc = self.revbranchcache()
1971 2031 for r in self.changelog:
1972 2032 rbc.branchinfo(r)
1973 2033 rbc.write()
1974 2034
1975 2035 # ensure the working copy parents are in the manifestfulltextcache
1976 2036 for ctx in self['.'].parents():
1977 2037 ctx.manifest() # accessing the manifest is enough
1978 2038
1979 2039 def invalidatecaches(self):
1980 2040
1981 2041 if '_tagscache' in vars(self):
1982 2042 # can't use delattr on proxy
1983 2043 del self.__dict__['_tagscache']
1984 2044
1985 2045 self.unfiltered()._branchcaches.clear()
1986 2046 self.invalidatevolatilesets()
1987 2047 self._sparsesignaturecache.clear()
1988 2048
1989 2049 def invalidatevolatilesets(self):
1990 2050 self.filteredrevcache.clear()
1991 2051 obsolete.clearobscaches(self)
1992 2052
1993 2053 def invalidatedirstate(self):
1994 2054 '''Invalidates the dirstate, causing the next call to dirstate
1995 2055 to check if it was modified since the last time it was read,
1996 2056 rereading it if it has.
1997 2057
1998 2058 This is different to dirstate.invalidate() that it doesn't always
1999 2059 rereads the dirstate. Use dirstate.invalidate() if you want to
2000 2060 explicitly read the dirstate again (i.e. restoring it to a previous
2001 2061 known good state).'''
2002 2062 if hasunfilteredcache(self, 'dirstate'):
2003 2063 for k in self.dirstate._filecache:
2004 2064 try:
2005 2065 delattr(self.dirstate, k)
2006 2066 except AttributeError:
2007 2067 pass
2008 2068 delattr(self.unfiltered(), 'dirstate')
2009 2069
2010 2070 def invalidate(self, clearfilecache=False):
2011 2071 '''Invalidates both store and non-store parts other than dirstate
2012 2072
2013 2073 If a transaction is running, invalidation of store is omitted,
2014 2074 because discarding in-memory changes might cause inconsistency
2015 2075 (e.g. incomplete fncache causes unintentional failure, but
2016 2076 redundant one doesn't).
2017 2077 '''
2018 2078 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2019 2079 for k in list(self._filecache.keys()):
2020 2080 # dirstate is invalidated separately in invalidatedirstate()
2021 2081 if k == 'dirstate':
2022 2082 continue
2023 2083 if (k == 'changelog' and
2024 2084 self.currenttransaction() and
2025 2085 self.changelog._delayed):
2026 2086 # The changelog object may store unwritten revisions. We don't
2027 2087 # want to lose them.
2028 2088 # TODO: Solve the problem instead of working around it.
2029 2089 continue
2030 2090
2031 2091 if clearfilecache:
2032 2092 del self._filecache[k]
2033 2093 try:
2034 2094 delattr(unfiltered, k)
2035 2095 except AttributeError:
2036 2096 pass
2037 2097 self.invalidatecaches()
2038 2098 if not self.currenttransaction():
2039 2099 # TODO: Changing contents of store outside transaction
2040 2100 # causes inconsistency. We should make in-memory store
2041 2101 # changes detectable, and abort if changed.
2042 2102 self.store.invalidatecaches()
2043 2103
2044 2104 def invalidateall(self):
2045 2105 '''Fully invalidates both store and non-store parts, causing the
2046 2106 subsequent operation to reread any outside changes.'''
2047 2107 # extension should hook this to invalidate its caches
2048 2108 self.invalidate()
2049 2109 self.invalidatedirstate()
2050 2110
2051 2111 @unfilteredmethod
2052 2112 def _refreshfilecachestats(self, tr):
2053 2113 """Reload stats of cached files so that they are flagged as valid"""
2054 2114 for k, ce in self._filecache.items():
2055 2115 k = pycompat.sysstr(k)
2056 2116 if k == r'dirstate' or k not in self.__dict__:
2057 2117 continue
2058 2118 ce.refresh()
2059 2119
2060 2120 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2061 2121 inheritchecker=None, parentenvvar=None):
2062 2122 parentlock = None
2063 2123 # the contents of parentenvvar are used by the underlying lock to
2064 2124 # determine whether it can be inherited
2065 2125 if parentenvvar is not None:
2066 2126 parentlock = encoding.environ.get(parentenvvar)
2067 2127
2068 2128 timeout = 0
2069 2129 warntimeout = 0
2070 2130 if wait:
2071 2131 timeout = self.ui.configint("ui", "timeout")
2072 2132 warntimeout = self.ui.configint("ui", "timeout.warn")
2073 2133 # internal config: ui.signal-safe-lock
2074 2134 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2075 2135
2076 2136 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2077 2137 releasefn=releasefn,
2078 2138 acquirefn=acquirefn, desc=desc,
2079 2139 inheritchecker=inheritchecker,
2080 2140 parentlock=parentlock,
2081 2141 signalsafe=signalsafe)
2082 2142 return l
2083 2143
2084 2144 def _afterlock(self, callback):
2085 2145 """add a callback to be run when the repository is fully unlocked
2086 2146
2087 2147 The callback will be executed when the outermost lock is released
2088 2148 (with wlock being higher level than 'lock')."""
2089 2149 for ref in (self._wlockref, self._lockref):
2090 2150 l = ref and ref()
2091 2151 if l and l.held:
2092 2152 l.postrelease.append(callback)
2093 2153 break
2094 2154 else: # no lock have been found.
2095 2155 callback()
2096 2156
2097 2157 def lock(self, wait=True):
2098 2158 '''Lock the repository store (.hg/store) and return a weak reference
2099 2159 to the lock. Use this before modifying the store (e.g. committing or
2100 2160 stripping). If you are opening a transaction, get a lock as well.)
2101 2161
2102 2162 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2103 2163 'wlock' first to avoid a dead-lock hazard.'''
2104 2164 l = self._currentlock(self._lockref)
2105 2165 if l is not None:
2106 2166 l.lock()
2107 2167 return l
2108 2168
2109 2169 l = self._lock(self.svfs, "lock", wait, None,
2110 2170 self.invalidate, _('repository %s') % self.origroot)
2111 2171 self._lockref = weakref.ref(l)
2112 2172 return l
2113 2173
2114 2174 def _wlockchecktransaction(self):
2115 2175 if self.currenttransaction() is not None:
2116 2176 raise error.LockInheritanceContractViolation(
2117 2177 'wlock cannot be inherited in the middle of a transaction')
2118 2178
2119 2179 def wlock(self, wait=True):
2120 2180 '''Lock the non-store parts of the repository (everything under
2121 2181 .hg except .hg/store) and return a weak reference to the lock.
2122 2182
2123 2183 Use this before modifying files in .hg.
2124 2184
2125 2185 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2126 2186 'wlock' first to avoid a dead-lock hazard.'''
2127 2187 l = self._wlockref and self._wlockref()
2128 2188 if l is not None and l.held:
2129 2189 l.lock()
2130 2190 return l
2131 2191
2132 2192 # We do not need to check for non-waiting lock acquisition. Such
2133 2193 # acquisition would not cause dead-lock as they would just fail.
2134 2194 if wait and (self.ui.configbool('devel', 'all-warnings')
2135 2195 or self.ui.configbool('devel', 'check-locks')):
2136 2196 if self._currentlock(self._lockref) is not None:
2137 2197 self.ui.develwarn('"wlock" acquired after "lock"')
2138 2198
2139 2199 def unlock():
2140 2200 if self.dirstate.pendingparentchange():
2141 2201 self.dirstate.invalidate()
2142 2202 else:
2143 2203 self.dirstate.write(None)
2144 2204
2145 2205 self._filecache['dirstate'].refresh()
2146 2206
2147 2207 l = self._lock(self.vfs, "wlock", wait, unlock,
2148 2208 self.invalidatedirstate, _('working directory of %s') %
2149 2209 self.origroot,
2150 2210 inheritchecker=self._wlockchecktransaction,
2151 2211 parentenvvar='HG_WLOCK_LOCKER')
2152 2212 self._wlockref = weakref.ref(l)
2153 2213 return l
2154 2214
2155 2215 def _currentlock(self, lockref):
2156 2216 """Returns the lock if it's held, or None if it's not."""
2157 2217 if lockref is None:
2158 2218 return None
2159 2219 l = lockref()
2160 2220 if l is None or not l.held:
2161 2221 return None
2162 2222 return l
2163 2223
2164 2224 def currentwlock(self):
2165 2225 """Returns the wlock if it's held, or None if it's not."""
2166 2226 return self._currentlock(self._wlockref)
2167 2227
2168 2228 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2169 2229 """
2170 2230 commit an individual file as part of a larger transaction
2171 2231 """
2172 2232
2173 2233 fname = fctx.path()
2174 2234 fparent1 = manifest1.get(fname, nullid)
2175 2235 fparent2 = manifest2.get(fname, nullid)
2176 2236 if isinstance(fctx, context.filectx):
2177 2237 node = fctx.filenode()
2178 2238 if node in [fparent1, fparent2]:
2179 2239 self.ui.debug('reusing %s filelog entry\n' % fname)
2180 2240 if manifest1.flags(fname) != fctx.flags():
2181 2241 changelist.append(fname)
2182 2242 return node
2183 2243
2184 2244 flog = self.file(fname)
2185 2245 meta = {}
2186 2246 copy = fctx.renamed()
2187 2247 if copy and copy[0] != fname:
2188 2248 # Mark the new revision of this file as a copy of another
2189 2249 # file. This copy data will effectively act as a parent
2190 2250 # of this new revision. If this is a merge, the first
2191 2251 # parent will be the nullid (meaning "look up the copy data")
2192 2252 # and the second one will be the other parent. For example:
2193 2253 #
2194 2254 # 0 --- 1 --- 3 rev1 changes file foo
2195 2255 # \ / rev2 renames foo to bar and changes it
2196 2256 # \- 2 -/ rev3 should have bar with all changes and
2197 2257 # should record that bar descends from
2198 2258 # bar in rev2 and foo in rev1
2199 2259 #
2200 2260 # this allows this merge to succeed:
2201 2261 #
2202 2262 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2203 2263 # \ / merging rev3 and rev4 should use bar@rev2
2204 2264 # \- 2 --- 4 as the merge base
2205 2265 #
2206 2266
2207 2267 cfname = copy[0]
2208 2268 crev = manifest1.get(cfname)
2209 2269 newfparent = fparent2
2210 2270
2211 2271 if manifest2: # branch merge
2212 2272 if fparent2 == nullid or crev is None: # copied on remote side
2213 2273 if cfname in manifest2:
2214 2274 crev = manifest2[cfname]
2215 2275 newfparent = fparent1
2216 2276
2217 2277 # Here, we used to search backwards through history to try to find
2218 2278 # where the file copy came from if the source of a copy was not in
2219 2279 # the parent directory. However, this doesn't actually make sense to
2220 2280 # do (what does a copy from something not in your working copy even
2221 2281 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2222 2282 # the user that copy information was dropped, so if they didn't
2223 2283 # expect this outcome it can be fixed, but this is the correct
2224 2284 # behavior in this circumstance.
2225 2285
2226 2286 if crev:
2227 2287 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2228 2288 meta["copy"] = cfname
2229 2289 meta["copyrev"] = hex(crev)
2230 2290 fparent1, fparent2 = nullid, newfparent
2231 2291 else:
2232 2292 self.ui.warn(_("warning: can't find ancestor for '%s' "
2233 2293 "copied from '%s'!\n") % (fname, cfname))
2234 2294
2235 2295 elif fparent1 == nullid:
2236 2296 fparent1, fparent2 = fparent2, nullid
2237 2297 elif fparent2 != nullid:
2238 2298 # is one parent an ancestor of the other?
2239 2299 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2240 2300 if fparent1 in fparentancestors:
2241 2301 fparent1, fparent2 = fparent2, nullid
2242 2302 elif fparent2 in fparentancestors:
2243 2303 fparent2 = nullid
2244 2304
2245 2305 # is the file changed?
2246 2306 text = fctx.data()
2247 2307 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2248 2308 changelist.append(fname)
2249 2309 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2250 2310 # are just the flags changed during merge?
2251 2311 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2252 2312 changelist.append(fname)
2253 2313
2254 2314 return fparent1
2255 2315
2256 2316 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2257 2317 """check for commit arguments that aren't committable"""
2258 2318 if match.isexact() or match.prefix():
2259 2319 matched = set(status.modified + status.added + status.removed)
2260 2320
2261 2321 for f in match.files():
2262 2322 f = self.dirstate.normalize(f)
2263 2323 if f == '.' or f in matched or f in wctx.substate:
2264 2324 continue
2265 2325 if f in status.deleted:
2266 2326 fail(f, _('file not found!'))
2267 2327 if f in vdirs: # visited directory
2268 2328 d = f + '/'
2269 2329 for mf in matched:
2270 2330 if mf.startswith(d):
2271 2331 break
2272 2332 else:
2273 2333 fail(f, _("no match under directory!"))
2274 2334 elif f not in self.dirstate:
2275 2335 fail(f, _("file not tracked!"))
2276 2336
2277 2337 @unfilteredmethod
2278 2338 def commit(self, text="", user=None, date=None, match=None, force=False,
2279 2339 editor=False, extra=None):
2280 2340 """Add a new revision to current repository.
2281 2341
2282 2342 Revision information is gathered from the working directory,
2283 2343 match can be used to filter the committed files. If editor is
2284 2344 supplied, it is called to get a commit message.
2285 2345 """
2286 2346 if extra is None:
2287 2347 extra = {}
2288 2348
2289 2349 def fail(f, msg):
2290 2350 raise error.Abort('%s: %s' % (f, msg))
2291 2351
2292 2352 if not match:
2293 2353 match = matchmod.always(self.root, '')
2294 2354
2295 2355 if not force:
2296 2356 vdirs = []
2297 2357 match.explicitdir = vdirs.append
2298 2358 match.bad = fail
2299 2359
2300 2360 wlock = lock = tr = None
2301 2361 try:
2302 2362 wlock = self.wlock()
2303 2363 lock = self.lock() # for recent changelog (see issue4368)
2304 2364
2305 2365 wctx = self[None]
2306 2366 merge = len(wctx.parents()) > 1
2307 2367
2308 2368 if not force and merge and not match.always():
2309 2369 raise error.Abort(_('cannot partially commit a merge '
2310 2370 '(do not specify files or patterns)'))
2311 2371
2312 2372 status = self.status(match=match, clean=force)
2313 2373 if force:
2314 2374 status.modified.extend(status.clean) # mq may commit clean files
2315 2375
2316 2376 # check subrepos
2317 2377 subs, commitsubs, newstate = subrepoutil.precommit(
2318 2378 self.ui, wctx, status, match, force=force)
2319 2379
2320 2380 # make sure all explicit patterns are matched
2321 2381 if not force:
2322 2382 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2323 2383
2324 2384 cctx = context.workingcommitctx(self, status,
2325 2385 text, user, date, extra)
2326 2386
2327 2387 # internal config: ui.allowemptycommit
2328 2388 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2329 2389 or extra.get('close') or merge or cctx.files()
2330 2390 or self.ui.configbool('ui', 'allowemptycommit'))
2331 2391 if not allowemptycommit:
2332 2392 return None
2333 2393
2334 2394 if merge and cctx.deleted():
2335 2395 raise error.Abort(_("cannot commit merge with missing files"))
2336 2396
2337 2397 ms = mergemod.mergestate.read(self)
2338 2398 mergeutil.checkunresolved(ms)
2339 2399
2340 2400 if editor:
2341 2401 cctx._text = editor(self, cctx, subs)
2342 2402 edited = (text != cctx._text)
2343 2403
2344 2404 # Save commit message in case this transaction gets rolled back
2345 2405 # (e.g. by a pretxncommit hook). Leave the content alone on
2346 2406 # the assumption that the user will use the same editor again.
2347 2407 msgfn = self.savecommitmessage(cctx._text)
2348 2408
2349 2409 # commit subs and write new state
2350 2410 if subs:
2351 2411 for s in sorted(commitsubs):
2352 2412 sub = wctx.sub(s)
2353 2413 self.ui.status(_('committing subrepository %s\n') %
2354 2414 subrepoutil.subrelpath(sub))
2355 2415 sr = sub.commit(cctx._text, user, date)
2356 2416 newstate[s] = (newstate[s][0], sr)
2357 2417 subrepoutil.writestate(self, newstate)
2358 2418
2359 2419 p1, p2 = self.dirstate.parents()
2360 2420 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2361 2421 try:
2362 2422 self.hook("precommit", throw=True, parent1=hookp1,
2363 2423 parent2=hookp2)
2364 2424 tr = self.transaction('commit')
2365 2425 ret = self.commitctx(cctx, True)
2366 2426 except: # re-raises
2367 2427 if edited:
2368 2428 self.ui.write(
2369 2429 _('note: commit message saved in %s\n') % msgfn)
2370 2430 raise
2371 2431 # update bookmarks, dirstate and mergestate
2372 2432 bookmarks.update(self, [p1, p2], ret)
2373 2433 cctx.markcommitted(ret)
2374 2434 ms.reset()
2375 2435 tr.close()
2376 2436
2377 2437 finally:
2378 2438 lockmod.release(tr, lock, wlock)
2379 2439
2380 2440 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2381 2441 # hack for command that use a temporary commit (eg: histedit)
2382 2442 # temporary commit got stripped before hook release
2383 2443 if self.changelog.hasnode(ret):
2384 2444 self.hook("commit", node=node, parent1=parent1,
2385 2445 parent2=parent2)
2386 2446 self._afterlock(commithook)
2387 2447 return ret
2388 2448
2389 2449 @unfilteredmethod
2390 2450 def commitctx(self, ctx, error=False):
2391 2451 """Add a new revision to current repository.
2392 2452 Revision information is passed via the context argument.
2393 2453
2394 2454 ctx.files() should list all files involved in this commit, i.e.
2395 2455 modified/added/removed files. On merge, it may be wider than the
2396 2456 ctx.files() to be committed, since any file nodes derived directly
2397 2457 from p1 or p2 are excluded from the committed ctx.files().
2398 2458 """
2399 2459
2400 2460 tr = None
2401 2461 p1, p2 = ctx.p1(), ctx.p2()
2402 2462 user = ctx.user()
2403 2463
2404 2464 lock = self.lock()
2405 2465 try:
2406 2466 tr = self.transaction("commit")
2407 2467 trp = weakref.proxy(tr)
2408 2468
2409 2469 if ctx.manifestnode():
2410 2470 # reuse an existing manifest revision
2411 2471 self.ui.debug('reusing known manifest\n')
2412 2472 mn = ctx.manifestnode()
2413 2473 files = ctx.files()
2414 2474 elif ctx.files():
2415 2475 m1ctx = p1.manifestctx()
2416 2476 m2ctx = p2.manifestctx()
2417 2477 mctx = m1ctx.copy()
2418 2478
2419 2479 m = mctx.read()
2420 2480 m1 = m1ctx.read()
2421 2481 m2 = m2ctx.read()
2422 2482
2423 2483 # check in files
2424 2484 added = []
2425 2485 changed = []
2426 2486 removed = list(ctx.removed())
2427 2487 linkrev = len(self)
2428 2488 self.ui.note(_("committing files:\n"))
2429 2489 for f in sorted(ctx.modified() + ctx.added()):
2430 2490 self.ui.note(f + "\n")
2431 2491 try:
2432 2492 fctx = ctx[f]
2433 2493 if fctx is None:
2434 2494 removed.append(f)
2435 2495 else:
2436 2496 added.append(f)
2437 2497 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2438 2498 trp, changed)
2439 2499 m.setflag(f, fctx.flags())
2440 2500 except OSError as inst:
2441 2501 self.ui.warn(_("trouble committing %s!\n") % f)
2442 2502 raise
2443 2503 except IOError as inst:
2444 2504 errcode = getattr(inst, 'errno', errno.ENOENT)
2445 2505 if error or errcode and errcode != errno.ENOENT:
2446 2506 self.ui.warn(_("trouble committing %s!\n") % f)
2447 2507 raise
2448 2508
2449 2509 # update manifest
2450 2510 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2451 2511 drop = [f for f in removed if f in m]
2452 2512 for f in drop:
2453 2513 del m[f]
2454 2514 files = changed + removed
2455 2515 md = None
2456 2516 if not files:
2457 2517 # if no "files" actually changed in terms of the changelog,
2458 2518 # try hard to detect unmodified manifest entry so that the
2459 2519 # exact same commit can be reproduced later on convert.
2460 2520 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2461 2521 if not files and md:
2462 2522 self.ui.debug('not reusing manifest (no file change in '
2463 2523 'changelog, but manifest differs)\n')
2464 2524 if files or md:
2465 2525 self.ui.note(_("committing manifest\n"))
2466 2526 # we're using narrowmatch here since it's already applied at
2467 2527 # other stages (such as dirstate.walk), so we're already
2468 2528 # ignoring things outside of narrowspec in most cases. The
2469 2529 # one case where we might have files outside the narrowspec
2470 2530 # at this point is merges, and we already error out in the
2471 2531 # case where the merge has files outside of the narrowspec,
2472 2532 # so this is safe.
2473 2533 mn = mctx.write(trp, linkrev,
2474 2534 p1.manifestnode(), p2.manifestnode(),
2475 2535 added, drop, match=self.narrowmatch())
2476 2536 else:
2477 2537 self.ui.debug('reusing manifest form p1 (listed files '
2478 2538 'actually unchanged)\n')
2479 2539 mn = p1.manifestnode()
2480 2540 else:
2481 2541 self.ui.debug('reusing manifest from p1 (no file change)\n')
2482 2542 mn = p1.manifestnode()
2483 2543 files = []
2484 2544
2485 2545 # update changelog
2486 2546 self.ui.note(_("committing changelog\n"))
2487 2547 self.changelog.delayupdate(tr)
2488 2548 n = self.changelog.add(mn, files, ctx.description(),
2489 2549 trp, p1.node(), p2.node(),
2490 2550 user, ctx.date(), ctx.extra().copy())
2491 2551 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2492 2552 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2493 2553 parent2=xp2)
2494 2554 # set the new commit is proper phase
2495 2555 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2496 2556 if targetphase:
2497 2557 # retract boundary do not alter parent changeset.
2498 2558 # if a parent have higher the resulting phase will
2499 2559 # be compliant anyway
2500 2560 #
2501 2561 # if minimal phase was 0 we don't need to retract anything
2502 2562 phases.registernew(self, tr, targetphase, [n])
2503 2563 tr.close()
2504 2564 return n
2505 2565 finally:
2506 2566 if tr:
2507 2567 tr.release()
2508 2568 lock.release()
2509 2569
2510 2570 @unfilteredmethod
2511 2571 def destroying(self):
2512 2572 '''Inform the repository that nodes are about to be destroyed.
2513 2573 Intended for use by strip and rollback, so there's a common
2514 2574 place for anything that has to be done before destroying history.
2515 2575
2516 2576 This is mostly useful for saving state that is in memory and waiting
2517 2577 to be flushed when the current lock is released. Because a call to
2518 2578 destroyed is imminent, the repo will be invalidated causing those
2519 2579 changes to stay in memory (waiting for the next unlock), or vanish
2520 2580 completely.
2521 2581 '''
2522 2582 # When using the same lock to commit and strip, the phasecache is left
2523 2583 # dirty after committing. Then when we strip, the repo is invalidated,
2524 2584 # causing those changes to disappear.
2525 2585 if '_phasecache' in vars(self):
2526 2586 self._phasecache.write()
2527 2587
2528 2588 @unfilteredmethod
2529 2589 def destroyed(self):
2530 2590 '''Inform the repository that nodes have been destroyed.
2531 2591 Intended for use by strip and rollback, so there's a common
2532 2592 place for anything that has to be done after destroying history.
2533 2593 '''
2534 2594 # When one tries to:
2535 2595 # 1) destroy nodes thus calling this method (e.g. strip)
2536 2596 # 2) use phasecache somewhere (e.g. commit)
2537 2597 #
2538 2598 # then 2) will fail because the phasecache contains nodes that were
2539 2599 # removed. We can either remove phasecache from the filecache,
2540 2600 # causing it to reload next time it is accessed, or simply filter
2541 2601 # the removed nodes now and write the updated cache.
2542 2602 self._phasecache.filterunknown(self)
2543 2603 self._phasecache.write()
2544 2604
2545 2605 # refresh all repository caches
2546 2606 self.updatecaches()
2547 2607
2548 2608 # Ensure the persistent tag cache is updated. Doing it now
2549 2609 # means that the tag cache only has to worry about destroyed
2550 2610 # heads immediately after a strip/rollback. That in turn
2551 2611 # guarantees that "cachetip == currenttip" (comparing both rev
2552 2612 # and node) always means no nodes have been added or destroyed.
2553 2613
2554 2614 # XXX this is suboptimal when qrefresh'ing: we strip the current
2555 2615 # head, refresh the tag cache, then immediately add a new head.
2556 2616 # But I think doing it this way is necessary for the "instant
2557 2617 # tag cache retrieval" case to work.
2558 2618 self.invalidate()
2559 2619
2560 2620 def status(self, node1='.', node2=None, match=None,
2561 2621 ignored=False, clean=False, unknown=False,
2562 2622 listsubrepos=False):
2563 2623 '''a convenience method that calls node1.status(node2)'''
2564 2624 return self[node1].status(node2, match, ignored, clean, unknown,
2565 2625 listsubrepos)
2566 2626
2567 2627 def addpostdsstatus(self, ps):
2568 2628 """Add a callback to run within the wlock, at the point at which status
2569 2629 fixups happen.
2570 2630
2571 2631 On status completion, callback(wctx, status) will be called with the
2572 2632 wlock held, unless the dirstate has changed from underneath or the wlock
2573 2633 couldn't be grabbed.
2574 2634
2575 2635 Callbacks should not capture and use a cached copy of the dirstate --
2576 2636 it might change in the meanwhile. Instead, they should access the
2577 2637 dirstate via wctx.repo().dirstate.
2578 2638
2579 2639 This list is emptied out after each status run -- extensions should
2580 2640 make sure it adds to this list each time dirstate.status is called.
2581 2641 Extensions should also make sure they don't call this for statuses
2582 2642 that don't involve the dirstate.
2583 2643 """
2584 2644
2585 2645 # The list is located here for uniqueness reasons -- it is actually
2586 2646 # managed by the workingctx, but that isn't unique per-repo.
2587 2647 self._postdsstatus.append(ps)
2588 2648
2589 2649 def postdsstatus(self):
2590 2650 """Used by workingctx to get the list of post-dirstate-status hooks."""
2591 2651 return self._postdsstatus
2592 2652
2593 2653 def clearpostdsstatus(self):
2594 2654 """Used by workingctx to clear post-dirstate-status hooks."""
2595 2655 del self._postdsstatus[:]
2596 2656
2597 2657 def heads(self, start=None):
2598 2658 if start is None:
2599 2659 cl = self.changelog
2600 2660 headrevs = reversed(cl.headrevs())
2601 2661 return [cl.node(rev) for rev in headrevs]
2602 2662
2603 2663 heads = self.changelog.heads(start)
2604 2664 # sort the output in rev descending order
2605 2665 return sorted(heads, key=self.changelog.rev, reverse=True)
2606 2666
2607 2667 def branchheads(self, branch=None, start=None, closed=False):
2608 2668 '''return a (possibly filtered) list of heads for the given branch
2609 2669
2610 2670 Heads are returned in topological order, from newest to oldest.
2611 2671 If branch is None, use the dirstate branch.
2612 2672 If start is not None, return only heads reachable from start.
2613 2673 If closed is True, return heads that are marked as closed as well.
2614 2674 '''
2615 2675 if branch is None:
2616 2676 branch = self[None].branch()
2617 2677 branches = self.branchmap()
2618 2678 if branch not in branches:
2619 2679 return []
2620 2680 # the cache returns heads ordered lowest to highest
2621 2681 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2622 2682 if start is not None:
2623 2683 # filter out the heads that cannot be reached from startrev
2624 2684 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2625 2685 bheads = [h for h in bheads if h in fbheads]
2626 2686 return bheads
2627 2687
2628 2688 def branches(self, nodes):
2629 2689 if not nodes:
2630 2690 nodes = [self.changelog.tip()]
2631 2691 b = []
2632 2692 for n in nodes:
2633 2693 t = n
2634 2694 while True:
2635 2695 p = self.changelog.parents(n)
2636 2696 if p[1] != nullid or p[0] == nullid:
2637 2697 b.append((t, n, p[0], p[1]))
2638 2698 break
2639 2699 n = p[0]
2640 2700 return b
2641 2701
2642 2702 def between(self, pairs):
2643 2703 r = []
2644 2704
2645 2705 for top, bottom in pairs:
2646 2706 n, l, i = top, [], 0
2647 2707 f = 1
2648 2708
2649 2709 while n != bottom and n != nullid:
2650 2710 p = self.changelog.parents(n)[0]
2651 2711 if i == f:
2652 2712 l.append(n)
2653 2713 f = f * 2
2654 2714 n = p
2655 2715 i += 1
2656 2716
2657 2717 r.append(l)
2658 2718
2659 2719 return r
2660 2720
2661 2721 def checkpush(self, pushop):
2662 2722 """Extensions can override this function if additional checks have
2663 2723 to be performed before pushing, or call it if they override push
2664 2724 command.
2665 2725 """
2666 2726
2667 2727 @unfilteredpropertycache
2668 2728 def prepushoutgoinghooks(self):
2669 2729 """Return util.hooks consists of a pushop with repo, remote, outgoing
2670 2730 methods, which are called before pushing changesets.
2671 2731 """
2672 2732 return util.hooks()
2673 2733
2674 2734 def pushkey(self, namespace, key, old, new):
2675 2735 try:
2676 2736 tr = self.currenttransaction()
2677 2737 hookargs = {}
2678 2738 if tr is not None:
2679 2739 hookargs.update(tr.hookargs)
2680 2740 hookargs = pycompat.strkwargs(hookargs)
2681 2741 hookargs[r'namespace'] = namespace
2682 2742 hookargs[r'key'] = key
2683 2743 hookargs[r'old'] = old
2684 2744 hookargs[r'new'] = new
2685 2745 self.hook('prepushkey', throw=True, **hookargs)
2686 2746 except error.HookAbort as exc:
2687 2747 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2688 2748 if exc.hint:
2689 2749 self.ui.write_err(_("(%s)\n") % exc.hint)
2690 2750 return False
2691 2751 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2692 2752 ret = pushkey.push(self, namespace, key, old, new)
2693 2753 def runhook():
2694 2754 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2695 2755 ret=ret)
2696 2756 self._afterlock(runhook)
2697 2757 return ret
2698 2758
2699 2759 def listkeys(self, namespace):
2700 2760 self.hook('prelistkeys', throw=True, namespace=namespace)
2701 2761 self.ui.debug('listing keys for "%s"\n' % namespace)
2702 2762 values = pushkey.list(self, namespace)
2703 2763 self.hook('listkeys', namespace=namespace, values=values)
2704 2764 return values
2705 2765
2706 2766 def debugwireargs(self, one, two, three=None, four=None, five=None):
2707 2767 '''used to test argument passing over the wire'''
2708 2768 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2709 2769 pycompat.bytestr(four),
2710 2770 pycompat.bytestr(five))
2711 2771
2712 2772 def savecommitmessage(self, text):
2713 2773 fp = self.vfs('last-message.txt', 'wb')
2714 2774 try:
2715 2775 fp.write(text)
2716 2776 finally:
2717 2777 fp.close()
2718 2778 return self.pathto(fp.name[len(self.root) + 1:])
2719 2779
2720 2780 # used to avoid circular references so destructors work
2721 2781 def aftertrans(files):
2722 2782 renamefiles = [tuple(t) for t in files]
2723 2783 def a():
2724 2784 for vfs, src, dest in renamefiles:
2725 2785 # if src and dest refer to a same file, vfs.rename is a no-op,
2726 2786 # leaving both src and dest on disk. delete dest to make sure
2727 2787 # the rename couldn't be such a no-op.
2728 2788 vfs.tryunlink(dest)
2729 2789 try:
2730 2790 vfs.rename(src, dest)
2731 2791 except OSError: # journal file does not yet exist
2732 2792 pass
2733 2793 return a
2734 2794
2735 2795 def undoname(fn):
2736 2796 base, name = os.path.split(fn)
2737 2797 assert name.startswith('journal')
2738 2798 return os.path.join(base, name.replace('journal', 'undo', 1))
2739 2799
2740 2800 def instance(ui, path, create, intents=None, createopts=None):
2741 2801 localpath = util.urllocalpath(path)
2742 2802 if create:
2743 2803 createrepository(ui, localpath, createopts=createopts)
2744 2804
2745 2805 return makelocalrepository(ui, localpath, intents=intents)
2746 2806
2747 2807 def islocal(path):
2748 2808 return True
2749 2809
2750 2810 def newreporequirements(ui, createopts=None):
2751 2811 """Determine the set of requirements for a new local repository.
2752 2812
2753 2813 Extensions can wrap this function to specify custom requirements for
2754 2814 new repositories.
2755 2815 """
2756 2816 createopts = createopts or {}
2757 2817
2758 2818 # If the repo is being created from a shared repository, we copy
2759 2819 # its requirements.
2760 2820 if 'sharedrepo' in createopts:
2761 2821 requirements = set(createopts['sharedrepo'].requirements)
2762 2822 if createopts.get('sharedrelative'):
2763 2823 requirements.add('relshared')
2764 2824 else:
2765 2825 requirements.add('shared')
2766 2826
2767 2827 return requirements
2768 2828
2769 2829 requirements = {'revlogv1'}
2770 2830 if ui.configbool('format', 'usestore'):
2771 2831 requirements.add('store')
2772 2832 if ui.configbool('format', 'usefncache'):
2773 2833 requirements.add('fncache')
2774 2834 if ui.configbool('format', 'dotencode'):
2775 2835 requirements.add('dotencode')
2776 2836
2777 2837 compengine = ui.config('experimental', 'format.compression')
2778 2838 if compengine not in util.compengines:
2779 2839 raise error.Abort(_('compression engine %s defined by '
2780 2840 'experimental.format.compression not available') %
2781 2841 compengine,
2782 2842 hint=_('run "hg debuginstall" to list available '
2783 2843 'compression engines'))
2784 2844
2785 2845 # zlib is the historical default and doesn't need an explicit requirement.
2786 2846 if compengine != 'zlib':
2787 2847 requirements.add('exp-compression-%s' % compengine)
2788 2848
2789 2849 if scmutil.gdinitconfig(ui):
2790 2850 requirements.add('generaldelta')
2791 2851 if ui.configbool('experimental', 'treemanifest'):
2792 2852 requirements.add('treemanifest')
2793 2853 # experimental config: format.sparse-revlog
2794 2854 if ui.configbool('format', 'sparse-revlog'):
2795 2855 requirements.add(SPARSEREVLOG_REQUIREMENT)
2796 2856
2797 2857 revlogv2 = ui.config('experimental', 'revlogv2')
2798 2858 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2799 2859 requirements.remove('revlogv1')
2800 2860 # generaldelta is implied by revlogv2.
2801 2861 requirements.discard('generaldelta')
2802 2862 requirements.add(REVLOGV2_REQUIREMENT)
2803 2863 # experimental config: format.internal-phase
2804 2864 if ui.configbool('format', 'internal-phase'):
2805 2865 requirements.add('internal-phase')
2806 2866
2807 2867 if createopts.get('narrowfiles'):
2808 2868 requirements.add(repository.NARROW_REQUIREMENT)
2809 2869
2810 2870 return requirements
2811 2871
2812 2872 def filterknowncreateopts(ui, createopts):
2813 2873 """Filters a dict of repo creation options against options that are known.
2814 2874
2815 2875 Receives a dict of repo creation options and returns a dict of those
2816 2876 options that we don't know how to handle.
2817 2877
2818 2878 This function is called as part of repository creation. If the
2819 2879 returned dict contains any items, repository creation will not
2820 2880 be allowed, as it means there was a request to create a repository
2821 2881 with options not recognized by loaded code.
2822 2882
2823 2883 Extensions can wrap this function to filter out creation options
2824 2884 they know how to handle.
2825 2885 """
2826 2886 known = {
2827 2887 'narrowfiles',
2828 2888 'sharedrepo',
2829 2889 'sharedrelative',
2830 2890 'shareditems',
2831 2891 }
2832 2892
2833 2893 return {k: v for k, v in createopts.items() if k not in known}
2834 2894
2835 2895 def createrepository(ui, path, createopts=None):
2836 2896 """Create a new repository in a vfs.
2837 2897
2838 2898 ``path`` path to the new repo's working directory.
2839 2899 ``createopts`` options for the new repository.
2840 2900
2841 2901 The following keys for ``createopts`` are recognized:
2842 2902
2843 2903 narrowfiles
2844 2904 Set up repository to support narrow file storage.
2845 2905 sharedrepo
2846 2906 Repository object from which storage should be shared.
2847 2907 sharedrelative
2848 2908 Boolean indicating if the path to the shared repo should be
2849 2909 stored as relative. By default, the pointer to the "parent" repo
2850 2910 is stored as an absolute path.
2851 2911 shareditems
2852 2912 Set of items to share to the new repository (in addition to storage).
2853 2913 """
2854 2914 createopts = createopts or {}
2855 2915
2856 2916 unknownopts = filterknowncreateopts(ui, createopts)
2857 2917
2858 2918 if not isinstance(unknownopts, dict):
2859 2919 raise error.ProgrammingError('filterknowncreateopts() did not return '
2860 2920 'a dict')
2861 2921
2862 2922 if unknownopts:
2863 2923 raise error.Abort(_('unable to create repository because of unknown '
2864 2924 'creation option: %s') %
2865 2925 ', '.join(sorted(unknownopts)),
2866 2926 hint=_('is a required extension not loaded?'))
2867 2927
2868 2928 requirements = newreporequirements(ui, createopts=createopts)
2869 2929
2870 2930 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2871 2931
2872 2932 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2873 2933 if hgvfs.exists():
2874 2934 raise error.RepoError(_('repository %s already exists') % path)
2875 2935
2876 2936 if 'sharedrepo' in createopts:
2877 2937 sharedpath = createopts['sharedrepo'].sharedpath
2878 2938
2879 2939 if createopts.get('sharedrelative'):
2880 2940 try:
2881 2941 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2882 2942 except (IOError, ValueError) as e:
2883 2943 # ValueError is raised on Windows if the drive letters differ
2884 2944 # on each path.
2885 2945 raise error.Abort(_('cannot calculate relative path'),
2886 2946 hint=stringutil.forcebytestr(e))
2887 2947
2888 2948 if not wdirvfs.exists():
2889 2949 wdirvfs.makedirs()
2890 2950
2891 2951 hgvfs.makedir(notindexed=True)
2892 2952
2893 2953 if b'store' in requirements and 'sharedrepo' not in createopts:
2894 2954 hgvfs.mkdir(b'store')
2895 2955
2896 2956 # We create an invalid changelog outside the store so very old
2897 2957 # Mercurial versions (which didn't know about the requirements
2898 2958 # file) encounter an error on reading the changelog. This
2899 2959 # effectively locks out old clients and prevents them from
2900 2960 # mucking with a repo in an unknown format.
2901 2961 #
2902 2962 # The revlog header has version 2, which won't be recognized by
2903 2963 # such old clients.
2904 2964 hgvfs.append(b'00changelog.i',
2905 2965 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2906 2966 b'layout')
2907 2967
2908 2968 scmutil.writerequires(hgvfs, requirements)
2909 2969
2910 2970 # Write out file telling readers where to find the shared store.
2911 2971 if 'sharedrepo' in createopts:
2912 2972 hgvfs.write(b'sharedpath', sharedpath)
2913 2973
2914 2974 if createopts.get('shareditems'):
2915 2975 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2916 2976 hgvfs.write(b'shared', shared)
2917 2977
2918 2978 def poisonrepository(repo):
2919 2979 """Poison a repository instance so it can no longer be used."""
2920 2980 # Perform any cleanup on the instance.
2921 2981 repo.close()
2922 2982
2923 2983 # Our strategy is to replace the type of the object with one that
2924 2984 # has all attribute lookups result in error.
2925 2985 #
2926 2986 # But we have to allow the close() method because some constructors
2927 2987 # of repos call close() on repo references.
2928 2988 class poisonedrepository(object):
2929 2989 def __getattribute__(self, item):
2930 2990 if item == r'close':
2931 2991 return object.__getattribute__(self, item)
2932 2992
2933 2993 raise error.ProgrammingError('repo instances should not be used '
2934 2994 'after unshare')
2935 2995
2936 2996 def close(self):
2937 2997 pass
2938 2998
2939 2999 # We may have a repoview, which intercepts __setattr__. So be sure
2940 3000 # we operate at the lowest level possible.
2941 3001 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now