##// END OF EJS Templates
merge with stable
Augie Fackler -
r36708:b529e640 merge default
parent child Browse files
Show More
@@ -1,2748 +1,2749
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 obsutil,
40 40 patch,
41 41 pathutil,
42 42 phases,
43 43 pycompat,
44 44 repoview,
45 45 revlog,
46 46 scmutil,
47 47 sparse,
48 48 subrepo,
49 49 subrepoutil,
50 50 util,
51 51 )
52 52 from .utils import dateutil
53 53
54 54 propertycache = util.propertycache
55 55
56 56 nonascii = re.compile(r'[^\x21-\x7f]').search
57 57
58 58 class basectx(object):
59 59 """A basectx object represents the common logic for its children:
60 60 changectx: read-only context that is already present in the repo,
61 61 workingctx: a context that represents the working directory and can
62 62 be committed,
63 63 memctx: a context that represents changes in-memory and can also
64 64 be committed."""
65 65 def __new__(cls, repo, changeid='', *args, **kwargs):
66 66 if isinstance(changeid, basectx):
67 67 return changeid
68 68
69 69 o = super(basectx, cls).__new__(cls)
70 70
71 71 o._repo = repo
72 72 o._rev = nullrev
73 73 o._node = nullid
74 74
75 75 return o
76 76
77 77 def __bytes__(self):
78 78 return short(self.node())
79 79
80 80 __str__ = encoding.strmethod(__bytes__)
81 81
82 82 def __repr__(self):
83 83 return r"<%s %s>" % (type(self).__name__, str(self))
84 84
85 85 def __eq__(self, other):
86 86 try:
87 87 return type(self) == type(other) and self._rev == other._rev
88 88 except AttributeError:
89 89 return False
90 90
91 91 def __ne__(self, other):
92 92 return not (self == other)
93 93
94 94 def __contains__(self, key):
95 95 return key in self._manifest
96 96
97 97 def __getitem__(self, key):
98 98 return self.filectx(key)
99 99
100 100 def __iter__(self):
101 101 return iter(self._manifest)
102 102
103 103 def _buildstatusmanifest(self, status):
104 104 """Builds a manifest that includes the given status results, if this is
105 105 a working copy context. For non-working copy contexts, it just returns
106 106 the normal manifest."""
107 107 return self.manifest()
108 108
109 109 def _matchstatus(self, other, match):
110 110 """This internal method provides a way for child objects to override the
111 111 match operator.
112 112 """
113 113 return match
114 114
115 115 def _buildstatus(self, other, s, match, listignored, listclean,
116 116 listunknown):
117 117 """build a status with respect to another context"""
118 118 # Load earliest manifest first for caching reasons. More specifically,
119 119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 122 # delta to what's in the cache. So that's one full reconstruction + one
123 123 # delta application.
124 124 mf2 = None
125 125 if self.rev() is not None and self.rev() < other.rev():
126 126 mf2 = self._buildstatusmanifest(s)
127 127 mf1 = other._buildstatusmanifest(s)
128 128 if mf2 is None:
129 129 mf2 = self._buildstatusmanifest(s)
130 130
131 131 modified, added = [], []
132 132 removed = []
133 133 clean = []
134 134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 135 deletedset = set(deleted)
136 136 d = mf1.diff(mf2, match=match, clean=listclean)
137 137 for fn, value in d.iteritems():
138 138 if fn in deletedset:
139 139 continue
140 140 if value is None:
141 141 clean.append(fn)
142 142 continue
143 143 (node1, flag1), (node2, flag2) = value
144 144 if node1 is None:
145 145 added.append(fn)
146 146 elif node2 is None:
147 147 removed.append(fn)
148 148 elif flag1 != flag2:
149 149 modified.append(fn)
150 150 elif node2 not in wdirnodes:
151 151 # When comparing files between two commits, we save time by
152 152 # not comparing the file contents when the nodeids differ.
153 153 # Note that this means we incorrectly report a reverted change
154 154 # to a file as a modification.
155 155 modified.append(fn)
156 156 elif self[fn].cmp(other[fn]):
157 157 modified.append(fn)
158 158 else:
159 159 clean.append(fn)
160 160
161 161 if removed:
162 162 # need to filter files if they are already reported as removed
163 163 unknown = [fn for fn in unknown if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 ignored = [fn for fn in ignored if fn not in mf1 and
166 166 (not match or match(fn))]
167 167 # if they're deleted, don't report them as removed
168 168 removed = [fn for fn in removed if fn not in deletedset]
169 169
170 170 return scmutil.status(modified, added, removed, deleted, unknown,
171 171 ignored, clean)
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepoutil.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182 def node(self):
183 183 return self._node
184 184 def hex(self):
185 185 return hex(self.node())
186 186 def manifest(self):
187 187 return self._manifest
188 188 def manifestctx(self):
189 189 return self._manifestctx
190 190 def repo(self):
191 191 return self._repo
192 192 def phasestr(self):
193 193 return phases.phasenames[self.phase()]
194 194 def mutable(self):
195 195 return self.phase() > phases.public
196 196
197 197 def getfileset(self, expr):
198 198 return fileset.getfileset(self, expr)
199 199
200 200 def obsolete(self):
201 201 """True if the changeset is obsolete"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 203
204 204 def extinct(self):
205 205 """True if the changeset is extinct"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 207
208 208 def orphan(self):
209 209 """True if the changeset is not obsolete but it's ancestor are"""
210 210 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
211 211
212 212 def phasedivergent(self):
213 213 """True if the changeset try to be a successor of a public changeset
214 214
215 215 Only non-public and non-obsolete changesets may be bumped.
216 216 """
217 217 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
218 218
219 219 def contentdivergent(self):
220 220 """Is a successors of a changeset with multiple possible successors set
221 221
222 222 Only non-public and non-obsolete changesets may be divergent.
223 223 """
224 224 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
225 225
226 226 def isunstable(self):
227 227 """True if the changeset is either unstable, bumped or divergent"""
228 228 return self.orphan() or self.phasedivergent() or self.contentdivergent()
229 229
230 230 def instabilities(self):
231 231 """return the list of instabilities affecting this changeset.
232 232
233 233 Instabilities are returned as strings. possible values are:
234 234 - orphan,
235 235 - phase-divergent,
236 236 - content-divergent.
237 237 """
238 238 instabilities = []
239 239 if self.orphan():
240 240 instabilities.append('orphan')
241 241 if self.phasedivergent():
242 242 instabilities.append('phase-divergent')
243 243 if self.contentdivergent():
244 244 instabilities.append('content-divergent')
245 245 return instabilities
246 246
247 247 def parents(self):
248 248 """return contexts for each parent changeset"""
249 249 return self._parents
250 250
251 251 def p1(self):
252 252 return self._parents[0]
253 253
254 254 def p2(self):
255 255 parents = self._parents
256 256 if len(parents) == 2:
257 257 return parents[1]
258 258 return changectx(self._repo, nullrev)
259 259
260 260 def _fileinfo(self, path):
261 261 if r'_manifest' in self.__dict__:
262 262 try:
263 263 return self._manifest[path], self._manifest.flags(path)
264 264 except KeyError:
265 265 raise error.ManifestLookupError(self._node, path,
266 266 _('not found in manifest'))
267 267 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 268 if path in self._manifestdelta:
269 269 return (self._manifestdelta[path],
270 270 self._manifestdelta.flags(path))
271 271 mfl = self._repo.manifestlog
272 272 try:
273 273 node, flag = mfl[self._changeset.manifest].find(path)
274 274 except KeyError:
275 275 raise error.ManifestLookupError(self._node, path,
276 276 _('not found in manifest'))
277 277
278 278 return node, flag
279 279
280 280 def filenode(self, path):
281 281 return self._fileinfo(path)[0]
282 282
283 283 def flags(self, path):
284 284 try:
285 285 return self._fileinfo(path)[1]
286 286 except error.LookupError:
287 287 return ''
288 288
289 289 def sub(self, path, allowcreate=True):
290 290 '''return a subrepo for the stored revision of path, never wdir()'''
291 291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292 292
293 293 def nullsub(self, path, pctx):
294 294 return subrepo.nullsubrepo(self, path, pctx)
295 295
296 296 def workingsub(self, path):
297 297 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 298 context.
299 299 '''
300 300 return subrepo.subrepo(self, path, allowwdir=True)
301 301
302 302 def match(self, pats=None, include=None, exclude=None, default='glob',
303 303 listsubrepos=False, badfn=None):
304 304 r = self._repo
305 305 return matchmod.match(r.root, r.getcwd(), pats,
306 306 include, exclude, default,
307 307 auditor=r.nofsauditor, ctx=self,
308 308 listsubrepos=listsubrepos, badfn=badfn)
309 309
310 310 def diff(self, ctx2=None, match=None, **opts):
311 311 """Returns a diff generator for the given contexts and matcher"""
312 312 if ctx2 is None:
313 313 ctx2 = self.p1()
314 314 if ctx2 is not None:
315 315 ctx2 = self._repo[ctx2]
316 316 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
317 317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318 318
319 319 def dirs(self):
320 320 return self._manifest.dirs()
321 321
322 322 def hasdir(self, dir):
323 323 return self._manifest.hasdir(dir)
324 324
325 325 def status(self, other=None, match=None, listignored=False,
326 326 listclean=False, listunknown=False, listsubrepos=False):
327 327 """return status of files between two nodes or node and working
328 328 directory.
329 329
330 330 If other is None, compare this node with working directory.
331 331
332 332 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 333 """
334 334
335 335 ctx1 = self
336 336 ctx2 = self._repo[other]
337 337
338 338 # This next code block is, admittedly, fragile logic that tests for
339 339 # reversing the contexts and wouldn't need to exist if it weren't for
340 340 # the fast (and common) code path of comparing the working directory
341 341 # with its first parent.
342 342 #
343 343 # What we're aiming for here is the ability to call:
344 344 #
345 345 # workingctx.status(parentctx)
346 346 #
347 347 # If we always built the manifest for each context and compared those,
348 348 # then we'd be done. But the special case of the above call means we
349 349 # just copy the manifest of the parent.
350 350 reversed = False
351 351 if (not isinstance(ctx1, changectx)
352 352 and isinstance(ctx2, changectx)):
353 353 reversed = True
354 354 ctx1, ctx2 = ctx2, ctx1
355 355
356 356 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
357 357 match = ctx2._matchstatus(ctx1, match)
358 358 r = scmutil.status([], [], [], [], [], [], [])
359 359 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
360 360 listunknown)
361 361
362 362 if reversed:
363 363 # Reverse added and removed. Clear deleted, unknown and ignored as
364 364 # these make no sense to reverse.
365 365 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
366 366 r.clean)
367 367
368 368 if listsubrepos:
369 369 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
370 370 try:
371 371 rev2 = ctx2.subrev(subpath)
372 372 except KeyError:
373 373 # A subrepo that existed in node1 was deleted between
374 374 # node1 and node2 (inclusive). Thus, ctx2's substate
375 375 # won't contain that subpath. The best we can do ignore it.
376 376 rev2 = None
377 377 submatch = matchmod.subdirmatcher(subpath, match)
378 378 s = sub.status(rev2, match=submatch, ignored=listignored,
379 379 clean=listclean, unknown=listunknown,
380 380 listsubrepos=True)
381 381 for rfiles, sfiles in zip(r, s):
382 382 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
383 383
384 384 for l in r:
385 385 l.sort()
386 386
387 387 return r
388 388
389 389 def _filterederror(repo, changeid):
390 390 """build an exception to be raised about a filtered changeid
391 391
392 392 This is extracted in a function to help extensions (eg: evolve) to
393 393 experiment with various message variants."""
394 394 if repo.filtername.startswith('visible'):
395 395
396 396 # Check if the changeset is obsolete
397 397 unfilteredrepo = repo.unfiltered()
398 398 ctx = unfilteredrepo[changeid]
399 399
400 400 # If the changeset is obsolete, enrich the message with the reason
401 401 # that made this changeset not visible
402 402 if ctx.obsolete():
403 403 msg = obsutil._getfilteredreason(repo, changeid, ctx)
404 404 else:
405 405 msg = _("hidden revision '%s'") % changeid
406 406
407 407 hint = _('use --hidden to access hidden revisions')
408 408
409 409 return error.FilteredRepoLookupError(msg, hint=hint)
410 410 msg = _("filtered revision '%s' (not in '%s' subset)")
411 411 msg %= (changeid, repo.filtername)
412 412 return error.FilteredRepoLookupError(msg)
413 413
414 414 class changectx(basectx):
415 415 """A changecontext object makes access to data related to a particular
416 416 changeset convenient. It represents a read-only context already present in
417 417 the repo."""
418 418 def __init__(self, repo, changeid=''):
419 419 """changeid is a revision number, node, or tag"""
420 420
421 421 # since basectx.__new__ already took care of copying the object, we
422 422 # don't need to do anything in __init__, so we just exit here
423 423 if isinstance(changeid, basectx):
424 424 return
425 425
426 426 if changeid == '':
427 427 changeid = '.'
428 428 self._repo = repo
429 429
430 430 try:
431 431 if isinstance(changeid, int):
432 432 self._node = repo.changelog.node(changeid)
433 433 self._rev = changeid
434 434 return
435 435 if not pycompat.ispy3 and isinstance(changeid, long):
436 436 changeid = str(changeid)
437 437 if changeid == 'null':
438 438 self._node = nullid
439 439 self._rev = nullrev
440 440 return
441 441 if changeid == 'tip':
442 442 self._node = repo.changelog.tip()
443 443 self._rev = repo.changelog.rev(self._node)
444 444 return
445 445 if (changeid == '.'
446 446 or repo.local() and changeid == repo.dirstate.p1()):
447 447 # this is a hack to delay/avoid loading obsmarkers
448 448 # when we know that '.' won't be hidden
449 449 self._node = repo.dirstate.p1()
450 450 self._rev = repo.unfiltered().changelog.rev(self._node)
451 451 return
452 452 if len(changeid) == 20:
453 453 try:
454 454 self._node = changeid
455 455 self._rev = repo.changelog.rev(changeid)
456 456 return
457 457 except error.FilteredRepoLookupError:
458 458 raise
459 459 except LookupError:
460 460 pass
461 461
462 462 try:
463 463 r = int(changeid)
464 464 if '%d' % r != changeid:
465 465 raise ValueError
466 466 l = len(repo.changelog)
467 467 if r < 0:
468 468 r += l
469 469 if r < 0 or r >= l and r != wdirrev:
470 470 raise ValueError
471 471 self._rev = r
472 472 self._node = repo.changelog.node(r)
473 473 return
474 474 except error.FilteredIndexError:
475 475 raise
476 476 except (ValueError, OverflowError, IndexError):
477 477 pass
478 478
479 479 if len(changeid) == 40:
480 480 try:
481 481 self._node = bin(changeid)
482 482 self._rev = repo.changelog.rev(self._node)
483 483 return
484 484 except error.FilteredLookupError:
485 485 raise
486 486 except (TypeError, LookupError):
487 487 pass
488 488
489 489 # lookup bookmarks through the name interface
490 490 try:
491 491 self._node = repo.names.singlenode(repo, changeid)
492 492 self._rev = repo.changelog.rev(self._node)
493 493 return
494 494 except KeyError:
495 495 pass
496 496 except error.FilteredRepoLookupError:
497 497 raise
498 498 except error.RepoLookupError:
499 499 pass
500 500
501 501 self._node = repo.unfiltered().changelog._partialmatch(changeid)
502 502 if self._node is not None:
503 503 self._rev = repo.changelog.rev(self._node)
504 504 return
505 505
506 506 # lookup failed
507 507 # check if it might have come from damaged dirstate
508 508 #
509 509 # XXX we could avoid the unfiltered if we had a recognizable
510 510 # exception for filtered changeset access
511 511 if (repo.local()
512 512 and changeid in repo.unfiltered().dirstate.parents()):
513 513 msg = _("working directory has unknown parent '%s'!")
514 514 raise error.Abort(msg % short(changeid))
515 515 try:
516 516 if len(changeid) == 20 and nonascii(changeid):
517 517 changeid = hex(changeid)
518 518 except TypeError:
519 519 pass
520 520 except (error.FilteredIndexError, error.FilteredLookupError,
521 521 error.FilteredRepoLookupError):
522 522 raise _filterederror(repo, changeid)
523 523 except IndexError:
524 524 pass
525 525 raise error.RepoLookupError(
526 526 _("unknown revision '%s'") % changeid)
527 527
528 528 def __hash__(self):
529 529 try:
530 530 return hash(self._rev)
531 531 except AttributeError:
532 532 return id(self)
533 533
534 534 def __nonzero__(self):
535 535 return self._rev != nullrev
536 536
537 537 __bool__ = __nonzero__
538 538
539 539 @propertycache
540 540 def _changeset(self):
541 541 return self._repo.changelog.changelogrevision(self.rev())
542 542
543 543 @propertycache
544 544 def _manifest(self):
545 545 return self._manifestctx.read()
546 546
547 547 @property
548 548 def _manifestctx(self):
549 549 return self._repo.manifestlog[self._changeset.manifest]
550 550
551 551 @propertycache
552 552 def _manifestdelta(self):
553 553 return self._manifestctx.readdelta()
554 554
555 555 @propertycache
556 556 def _parents(self):
557 557 repo = self._repo
558 558 p1, p2 = repo.changelog.parentrevs(self._rev)
559 559 if p2 == nullrev:
560 560 return [changectx(repo, p1)]
561 561 return [changectx(repo, p1), changectx(repo, p2)]
562 562
563 563 def changeset(self):
564 564 c = self._changeset
565 565 return (
566 566 c.manifest,
567 567 c.user,
568 568 c.date,
569 569 c.files,
570 570 c.description,
571 571 c.extra,
572 572 )
573 573 def manifestnode(self):
574 574 return self._changeset.manifest
575 575
576 576 def user(self):
577 577 return self._changeset.user
578 578 def date(self):
579 579 return self._changeset.date
580 580 def files(self):
581 581 return self._changeset.files
582 582 def description(self):
583 583 return self._changeset.description
584 584 def branch(self):
585 585 return encoding.tolocal(self._changeset.extra.get("branch"))
586 586 def closesbranch(self):
587 587 return 'close' in self._changeset.extra
588 588 def extra(self):
589 589 """Return a dict of extra information."""
590 590 return self._changeset.extra
591 591 def tags(self):
592 592 """Return a list of byte tag names"""
593 593 return self._repo.nodetags(self._node)
594 594 def bookmarks(self):
595 595 """Return a list of byte bookmark names."""
596 596 return self._repo.nodebookmarks(self._node)
597 597 def phase(self):
598 598 return self._repo._phasecache.phase(self._repo, self._rev)
599 599 def hidden(self):
600 600 return self._rev in repoview.filterrevs(self._repo, 'visible')
601 601
602 602 def isinmemory(self):
603 603 return False
604 604
605 605 def children(self):
606 606 """return list of changectx contexts for each child changeset.
607 607
608 608 This returns only the immediate child changesets. Use descendants() to
609 609 recursively walk children.
610 610 """
611 611 c = self._repo.changelog.children(self._node)
612 612 return [changectx(self._repo, x) for x in c]
613 613
614 614 def ancestors(self):
615 615 for a in self._repo.changelog.ancestors([self._rev]):
616 616 yield changectx(self._repo, a)
617 617
618 618 def descendants(self):
619 619 """Recursively yield all children of the changeset.
620 620
621 621 For just the immediate children, use children()
622 622 """
623 623 for d in self._repo.changelog.descendants([self._rev]):
624 624 yield changectx(self._repo, d)
625 625
626 626 def filectx(self, path, fileid=None, filelog=None):
627 627 """get a file context from this changeset"""
628 628 if fileid is None:
629 629 fileid = self.filenode(path)
630 630 return filectx(self._repo, path, fileid=fileid,
631 631 changectx=self, filelog=filelog)
632 632
633 633 def ancestor(self, c2, warn=False):
634 634 """return the "best" ancestor context of self and c2
635 635
636 636 If there are multiple candidates, it will show a message and check
637 637 merge.preferancestor configuration before falling back to the
638 638 revlog ancestor."""
639 639 # deal with workingctxs
640 640 n2 = c2._node
641 641 if n2 is None:
642 642 n2 = c2._parents[0]._node
643 643 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
644 644 if not cahs:
645 645 anc = nullid
646 646 elif len(cahs) == 1:
647 647 anc = cahs[0]
648 648 else:
649 649 # experimental config: merge.preferancestor
650 650 for r in self._repo.ui.configlist('merge', 'preferancestor'):
651 651 try:
652 652 ctx = changectx(self._repo, r)
653 653 except error.RepoLookupError:
654 654 continue
655 655 anc = ctx.node()
656 656 if anc in cahs:
657 657 break
658 658 else:
659 659 anc = self._repo.changelog.ancestor(self._node, n2)
660 660 if warn:
661 661 self._repo.ui.status(
662 662 (_("note: using %s as ancestor of %s and %s\n") %
663 663 (short(anc), short(self._node), short(n2))) +
664 664 ''.join(_(" alternatively, use --config "
665 665 "merge.preferancestor=%s\n") %
666 666 short(n) for n in sorted(cahs) if n != anc))
667 667 return changectx(self._repo, anc)
668 668
669 669 def descendant(self, other):
670 670 """True if other is descendant of this changeset"""
671 671 return self._repo.changelog.descendant(self._rev, other._rev)
672 672
673 673 def walk(self, match):
674 674 '''Generates matching file names.'''
675 675
676 676 # Wrap match.bad method to have message with nodeid
677 677 def bad(fn, msg):
678 678 # The manifest doesn't know about subrepos, so don't complain about
679 679 # paths into valid subrepos.
680 680 if any(fn == s or fn.startswith(s + '/')
681 681 for s in self.substate):
682 682 return
683 683 match.bad(fn, _('no such file in rev %s') % self)
684 684
685 685 m = matchmod.badmatch(match, bad)
686 686 return self._manifest.walk(m)
687 687
688 688 def matches(self, match):
689 689 return self.walk(match)
690 690
691 691 class basefilectx(object):
692 692 """A filecontext object represents the common logic for its children:
693 693 filectx: read-only access to a filerevision that is already present
694 694 in the repo,
695 695 workingfilectx: a filecontext that represents files from the working
696 696 directory,
697 697 memfilectx: a filecontext that represents files in-memory,
698 698 overlayfilectx: duplicate another filecontext with some fields overridden.
699 699 """
700 700 @propertycache
701 701 def _filelog(self):
702 702 return self._repo.file(self._path)
703 703
704 704 @propertycache
705 705 def _changeid(self):
706 706 if r'_changeid' in self.__dict__:
707 707 return self._changeid
708 708 elif r'_changectx' in self.__dict__:
709 709 return self._changectx.rev()
710 710 elif r'_descendantrev' in self.__dict__:
711 711 # this file context was created from a revision with a known
712 712 # descendant, we can (lazily) correct for linkrev aliases
713 713 return self._adjustlinkrev(self._descendantrev)
714 714 else:
715 715 return self._filelog.linkrev(self._filerev)
716 716
717 717 @propertycache
718 718 def _filenode(self):
719 719 if r'_fileid' in self.__dict__:
720 720 return self._filelog.lookup(self._fileid)
721 721 else:
722 722 return self._changectx.filenode(self._path)
723 723
724 724 @propertycache
725 725 def _filerev(self):
726 726 return self._filelog.rev(self._filenode)
727 727
728 728 @propertycache
729 729 def _repopath(self):
730 730 return self._path
731 731
732 732 def __nonzero__(self):
733 733 try:
734 734 self._filenode
735 735 return True
736 736 except error.LookupError:
737 737 # file is missing
738 738 return False
739 739
740 740 __bool__ = __nonzero__
741 741
742 742 def __bytes__(self):
743 743 try:
744 744 return "%s@%s" % (self.path(), self._changectx)
745 745 except error.LookupError:
746 746 return "%s@???" % self.path()
747 747
748 748 __str__ = encoding.strmethod(__bytes__)
749 749
750 750 def __repr__(self):
751 751 return "<%s %s>" % (type(self).__name__, str(self))
752 752
753 753 def __hash__(self):
754 754 try:
755 755 return hash((self._path, self._filenode))
756 756 except AttributeError:
757 757 return id(self)
758 758
759 759 def __eq__(self, other):
760 760 try:
761 761 return (type(self) == type(other) and self._path == other._path
762 762 and self._filenode == other._filenode)
763 763 except AttributeError:
764 764 return False
765 765
766 766 def __ne__(self, other):
767 767 return not (self == other)
768 768
769 769 def filerev(self):
770 770 return self._filerev
771 771 def filenode(self):
772 772 return self._filenode
773 773 @propertycache
774 774 def _flags(self):
775 775 return self._changectx.flags(self._path)
776 776 def flags(self):
777 777 return self._flags
778 778 def filelog(self):
779 779 return self._filelog
780 780 def rev(self):
781 781 return self._changeid
782 782 def linkrev(self):
783 783 return self._filelog.linkrev(self._filerev)
784 784 def node(self):
785 785 return self._changectx.node()
786 786 def hex(self):
787 787 return self._changectx.hex()
788 788 def user(self):
789 789 return self._changectx.user()
790 790 def date(self):
791 791 return self._changectx.date()
792 792 def files(self):
793 793 return self._changectx.files()
794 794 def description(self):
795 795 return self._changectx.description()
796 796 def branch(self):
797 797 return self._changectx.branch()
798 798 def extra(self):
799 799 return self._changectx.extra()
800 800 def phase(self):
801 801 return self._changectx.phase()
802 802 def phasestr(self):
803 803 return self._changectx.phasestr()
804 804 def obsolete(self):
805 805 return self._changectx.obsolete()
806 806 def instabilities(self):
807 807 return self._changectx.instabilities()
808 808 def manifest(self):
809 809 return self._changectx.manifest()
810 810 def changectx(self):
811 811 return self._changectx
812 812 def renamed(self):
813 813 return self._copied
814 814 def repo(self):
815 815 return self._repo
816 816 def size(self):
817 817 return len(self.data())
818 818
819 819 def path(self):
820 820 return self._path
821 821
822 822 def isbinary(self):
823 823 try:
824 824 return util.binary(self.data())
825 825 except IOError:
826 826 return False
827 827 def isexec(self):
828 828 return 'x' in self.flags()
829 829 def islink(self):
830 830 return 'l' in self.flags()
831 831
832 832 def isabsent(self):
833 833 """whether this filectx represents a file not in self._changectx
834 834
835 835 This is mainly for merge code to detect change/delete conflicts. This is
836 836 expected to be True for all subclasses of basectx."""
837 837 return False
838 838
839 839 _customcmp = False
840 840 def cmp(self, fctx):
841 841 """compare with other file context
842 842
843 843 returns True if different than fctx.
844 844 """
845 845 if fctx._customcmp:
846 846 return fctx.cmp(self)
847 847
848 848 if (fctx._filenode is None
849 849 and (self._repo._encodefilterpats
850 850 # if file data starts with '\1\n', empty metadata block is
851 851 # prepended, which adds 4 bytes to filelog.size().
852 852 or self.size() - 4 == fctx.size())
853 853 or self.size() == fctx.size()):
854 854 return self._filelog.cmp(self._filenode, fctx.data())
855 855
856 856 return True
857 857
858 858 def _adjustlinkrev(self, srcrev, inclusive=False):
859 859 """return the first ancestor of <srcrev> introducing <fnode>
860 860
861 861 If the linkrev of the file revision does not point to an ancestor of
862 862 srcrev, we'll walk down the ancestors until we find one introducing
863 863 this file revision.
864 864
865 865 :srcrev: the changeset revision we search ancestors from
866 866 :inclusive: if true, the src revision will also be checked
867 867 """
868 868 repo = self._repo
869 869 cl = repo.unfiltered().changelog
870 870 mfl = repo.manifestlog
871 871 # fetch the linkrev
872 872 lkr = self.linkrev()
873 873 # hack to reuse ancestor computation when searching for renames
874 874 memberanc = getattr(self, '_ancestrycontext', None)
875 875 iteranc = None
876 876 if srcrev is None:
877 877 # wctx case, used by workingfilectx during mergecopy
878 878 revs = [p.rev() for p in self._repo[None].parents()]
879 879 inclusive = True # we skipped the real (revless) source
880 880 else:
881 881 revs = [srcrev]
882 882 if memberanc is None:
883 883 memberanc = iteranc = cl.ancestors(revs, lkr,
884 884 inclusive=inclusive)
885 885 # check if this linkrev is an ancestor of srcrev
886 886 if lkr not in memberanc:
887 887 if iteranc is None:
888 888 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
889 889 fnode = self._filenode
890 890 path = self._path
891 891 for a in iteranc:
892 892 ac = cl.read(a) # get changeset data (we avoid object creation)
893 893 if path in ac[3]: # checking the 'files' field.
894 894 # The file has been touched, check if the content is
895 895 # similar to the one we search for.
896 896 if fnode == mfl[ac[0]].readfast().get(path):
897 897 return a
898 898 # In theory, we should never get out of that loop without a result.
899 899 # But if manifest uses a buggy file revision (not children of the
900 900 # one it replaces) we could. Such a buggy situation will likely
901 901 # result is crash somewhere else at to some point.
902 902 return lkr
903 903
904 904 def introrev(self):
905 905 """return the rev of the changeset which introduced this file revision
906 906
907 907 This method is different from linkrev because it take into account the
908 908 changeset the filectx was created from. It ensures the returned
909 909 revision is one of its ancestors. This prevents bugs from
910 910 'linkrev-shadowing' when a file revision is used by multiple
911 911 changesets.
912 912 """
913 913 lkr = self.linkrev()
914 914 attrs = vars(self)
915 915 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
916 916 if noctx or self.rev() == lkr:
917 917 return self.linkrev()
918 918 return self._adjustlinkrev(self.rev(), inclusive=True)
919 919
920 920 def introfilectx(self):
921 921 """Return filectx having identical contents, but pointing to the
922 922 changeset revision where this filectx was introduced"""
923 923 introrev = self.introrev()
924 924 if self.rev() == introrev:
925 925 return self
926 926 return self.filectx(self.filenode(), changeid=introrev)
927 927
928 928 def _parentfilectx(self, path, fileid, filelog):
929 929 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
930 930 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
931 931 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
932 932 # If self is associated with a changeset (probably explicitly
933 933 # fed), ensure the created filectx is associated with a
934 934 # changeset that is an ancestor of self.changectx.
935 935 # This lets us later use _adjustlinkrev to get a correct link.
936 936 fctx._descendantrev = self.rev()
937 937 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 938 elif r'_descendantrev' in vars(self):
939 939 # Otherwise propagate _descendantrev if we have one associated.
940 940 fctx._descendantrev = self._descendantrev
941 941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 942 return fctx
943 943
944 944 def parents(self):
945 945 _path = self._path
946 946 fl = self._filelog
947 947 parents = self._filelog.parents(self._filenode)
948 948 pl = [(_path, node, fl) for node in parents if node != nullid]
949 949
950 950 r = fl.renamed(self._filenode)
951 951 if r:
952 952 # - In the simple rename case, both parent are nullid, pl is empty.
953 953 # - In case of merge, only one of the parent is null id and should
954 954 # be replaced with the rename information. This parent is -always-
955 955 # the first one.
956 956 #
957 957 # As null id have always been filtered out in the previous list
958 958 # comprehension, inserting to 0 will always result in "replacing
959 959 # first nullid parent with rename information.
960 960 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
961 961
962 962 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
963 963
964 964 def p1(self):
965 965 return self.parents()[0]
966 966
967 967 def p2(self):
968 968 p = self.parents()
969 969 if len(p) == 2:
970 970 return p[1]
971 971 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
972 972
973 973 def annotate(self, follow=False, linenumber=False, skiprevs=None,
974 974 diffopts=None):
975 975 '''returns a list of tuples of ((ctx, number), line) for each line
976 976 in the file, where ctx is the filectx of the node where
977 977 that line was last changed; if linenumber parameter is true, number is
978 978 the line number at the first appearance in the managed file, otherwise,
979 979 number has a fixed value of False.
980 980 '''
981 981
982 982 def lines(text):
983 983 if text.endswith("\n"):
984 984 return text.count("\n")
985 985 return text.count("\n") + int(bool(text))
986 986
987 987 if linenumber:
988 988 def decorate(text, rev):
989 989 return ([annotateline(fctx=rev, lineno=i)
990 990 for i in xrange(1, lines(text) + 1)], text)
991 991 else:
992 992 def decorate(text, rev):
993 993 return ([annotateline(fctx=rev)] * lines(text), text)
994 994
995 995 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
996 996
997 997 def parents(f):
998 998 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
999 999 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1000 1000 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1001 1001 # isn't an ancestor of the srcrev.
1002 1002 f._changeid
1003 1003 pl = f.parents()
1004 1004
1005 1005 # Don't return renamed parents if we aren't following.
1006 1006 if not follow:
1007 1007 pl = [p for p in pl if p.path() == f.path()]
1008 1008
1009 1009 # renamed filectx won't have a filelog yet, so set it
1010 1010 # from the cache to save time
1011 1011 for p in pl:
1012 1012 if not r'_filelog' in p.__dict__:
1013 1013 p._filelog = getlog(p.path())
1014 1014
1015 1015 return pl
1016 1016
1017 1017 # use linkrev to find the first changeset where self appeared
1018 1018 base = self.introfilectx()
1019 1019 if getattr(base, '_ancestrycontext', None) is None:
1020 1020 cl = self._repo.changelog
1021 1021 if base.rev() is None:
1022 1022 # wctx is not inclusive, but works because _ancestrycontext
1023 1023 # is used to test filelog revisions
1024 1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1025 1025 inclusive=True)
1026 1026 else:
1027 1027 ac = cl.ancestors([base.rev()], inclusive=True)
1028 1028 base._ancestrycontext = ac
1029 1029
1030 1030 # This algorithm would prefer to be recursive, but Python is a
1031 1031 # bit recursion-hostile. Instead we do an iterative
1032 1032 # depth-first search.
1033 1033
1034 1034 # 1st DFS pre-calculates pcache and needed
1035 1035 visit = [base]
1036 1036 pcache = {}
1037 1037 needed = {base: 1}
1038 1038 while visit:
1039 1039 f = visit.pop()
1040 1040 if f in pcache:
1041 1041 continue
1042 1042 pl = parents(f)
1043 1043 pcache[f] = pl
1044 1044 for p in pl:
1045 1045 needed[p] = needed.get(p, 0) + 1
1046 1046 if p not in pcache:
1047 1047 visit.append(p)
1048 1048
1049 1049 # 2nd DFS does the actual annotate
1050 1050 visit[:] = [base]
1051 1051 hist = {}
1052 1052 while visit:
1053 1053 f = visit[-1]
1054 1054 if f in hist:
1055 1055 visit.pop()
1056 1056 continue
1057 1057
1058 1058 ready = True
1059 1059 pl = pcache[f]
1060 1060 for p in pl:
1061 1061 if p not in hist:
1062 1062 ready = False
1063 1063 visit.append(p)
1064 1064 if ready:
1065 1065 visit.pop()
1066 1066 curr = decorate(f.data(), f)
1067 1067 skipchild = False
1068 1068 if skiprevs is not None:
1069 1069 skipchild = f._changeid in skiprevs
1070 1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1071 1071 diffopts)
1072 1072 for p in pl:
1073 1073 if needed[p] == 1:
1074 1074 del hist[p]
1075 1075 del needed[p]
1076 1076 else:
1077 1077 needed[p] -= 1
1078 1078
1079 1079 hist[f] = curr
1080 1080 del pcache[f]
1081 1081
1082 return pycompat.ziplist(hist[base][0], hist[base][1].splitlines(True))
1082 lineattrs, text = hist[base]
1083 return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
1083 1084
1084 1085 def ancestors(self, followfirst=False):
1085 1086 visit = {}
1086 1087 c = self
1087 1088 if followfirst:
1088 1089 cut = 1
1089 1090 else:
1090 1091 cut = None
1091 1092
1092 1093 while True:
1093 1094 for parent in c.parents()[:cut]:
1094 1095 visit[(parent.linkrev(), parent.filenode())] = parent
1095 1096 if not visit:
1096 1097 break
1097 1098 c = visit.pop(max(visit))
1098 1099 yield c
1099 1100
1100 1101 def decodeddata(self):
1101 1102 """Returns `data()` after running repository decoding filters.
1102 1103
1103 1104 This is often equivalent to how the data would be expressed on disk.
1104 1105 """
1105 1106 return self._repo.wwritedata(self.path(), self.data())
1106 1107
1107 1108 @attr.s(slots=True, frozen=True)
1108 1109 class annotateline(object):
1109 1110 fctx = attr.ib()
1110 1111 lineno = attr.ib(default=False)
1111 1112 # Whether this annotation was the result of a skip-annotate.
1112 1113 skip = attr.ib(default=False)
1113 1114
1114 1115 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1115 1116 r'''
1116 1117 Given parent and child fctxes and annotate data for parents, for all lines
1117 1118 in either parent that match the child, annotate the child with the parent's
1118 1119 data.
1119 1120
1120 1121 Additionally, if `skipchild` is True, replace all other lines with parent
1121 1122 annotate data as well such that child is never blamed for any lines.
1122 1123
1123 1124 See test-annotate.py for unit tests.
1124 1125 '''
1125 1126 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1126 1127 for parent in parents]
1127 1128
1128 1129 if skipchild:
1129 1130 # Need to iterate over the blocks twice -- make it a list
1130 1131 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1131 1132 # Mercurial currently prefers p2 over p1 for annotate.
1132 1133 # TODO: change this?
1133 1134 for parent, blocks in pblocks:
1134 1135 for (a1, a2, b1, b2), t in blocks:
1135 1136 # Changed blocks ('!') or blocks made only of blank lines ('~')
1136 1137 # belong to the child.
1137 1138 if t == '=':
1138 1139 child[0][b1:b2] = parent[0][a1:a2]
1139 1140
1140 1141 if skipchild:
1141 1142 # Now try and match up anything that couldn't be matched,
1142 1143 # Reversing pblocks maintains bias towards p2, matching above
1143 1144 # behavior.
1144 1145 pblocks.reverse()
1145 1146
1146 1147 # The heuristics are:
1147 1148 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1148 1149 # This could potentially be smarter but works well enough.
1149 1150 # * For a non-matching section, do a best-effort fit. Match lines in
1150 1151 # diff hunks 1:1, dropping lines as necessary.
1151 1152 # * Repeat the last line as a last resort.
1152 1153
1153 1154 # First, replace as much as possible without repeating the last line.
1154 1155 remaining = [(parent, []) for parent, _blocks in pblocks]
1155 1156 for idx, (parent, blocks) in enumerate(pblocks):
1156 1157 for (a1, a2, b1, b2), _t in blocks:
1157 1158 if a2 - a1 >= b2 - b1:
1158 1159 for bk in xrange(b1, b2):
1159 1160 if child[0][bk].fctx == childfctx:
1160 1161 ak = min(a1 + (bk - b1), a2 - 1)
1161 1162 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1162 1163 else:
1163 1164 remaining[idx][1].append((a1, a2, b1, b2))
1164 1165
1165 1166 # Then, look at anything left, which might involve repeating the last
1166 1167 # line.
1167 1168 for parent, blocks in remaining:
1168 1169 for a1, a2, b1, b2 in blocks:
1169 1170 for bk in xrange(b1, b2):
1170 1171 if child[0][bk].fctx == childfctx:
1171 1172 ak = min(a1 + (bk - b1), a2 - 1)
1172 1173 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1173 1174 return child
1174 1175
1175 1176 class filectx(basefilectx):
1176 1177 """A filecontext object makes access to data related to a particular
1177 1178 filerevision convenient."""
1178 1179 def __init__(self, repo, path, changeid=None, fileid=None,
1179 1180 filelog=None, changectx=None):
1180 1181 """changeid can be a changeset revision, node, or tag.
1181 1182 fileid can be a file revision or node."""
1182 1183 self._repo = repo
1183 1184 self._path = path
1184 1185
1185 1186 assert (changeid is not None
1186 1187 or fileid is not None
1187 1188 or changectx is not None), \
1188 1189 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1189 1190 % (changeid, fileid, changectx))
1190 1191
1191 1192 if filelog is not None:
1192 1193 self._filelog = filelog
1193 1194
1194 1195 if changeid is not None:
1195 1196 self._changeid = changeid
1196 1197 if changectx is not None:
1197 1198 self._changectx = changectx
1198 1199 if fileid is not None:
1199 1200 self._fileid = fileid
1200 1201
1201 1202 @propertycache
1202 1203 def _changectx(self):
1203 1204 try:
1204 1205 return changectx(self._repo, self._changeid)
1205 1206 except error.FilteredRepoLookupError:
1206 1207 # Linkrev may point to any revision in the repository. When the
1207 1208 # repository is filtered this may lead to `filectx` trying to build
1208 1209 # `changectx` for filtered revision. In such case we fallback to
1209 1210 # creating `changectx` on the unfiltered version of the reposition.
1210 1211 # This fallback should not be an issue because `changectx` from
1211 1212 # `filectx` are not used in complex operations that care about
1212 1213 # filtering.
1213 1214 #
1214 1215 # This fallback is a cheap and dirty fix that prevent several
1215 1216 # crashes. It does not ensure the behavior is correct. However the
1216 1217 # behavior was not correct before filtering either and "incorrect
1217 1218 # behavior" is seen as better as "crash"
1218 1219 #
1219 1220 # Linkrevs have several serious troubles with filtering that are
1220 1221 # complicated to solve. Proper handling of the issue here should be
1221 1222 # considered when solving linkrev issue are on the table.
1222 1223 return changectx(self._repo.unfiltered(), self._changeid)
1223 1224
1224 1225 def filectx(self, fileid, changeid=None):
1225 1226 '''opens an arbitrary revision of the file without
1226 1227 opening a new filelog'''
1227 1228 return filectx(self._repo, self._path, fileid=fileid,
1228 1229 filelog=self._filelog, changeid=changeid)
1229 1230
1230 1231 def rawdata(self):
1231 1232 return self._filelog.revision(self._filenode, raw=True)
1232 1233
1233 1234 def rawflags(self):
1234 1235 """low-level revlog flags"""
1235 1236 return self._filelog.flags(self._filerev)
1236 1237
1237 1238 def data(self):
1238 1239 try:
1239 1240 return self._filelog.read(self._filenode)
1240 1241 except error.CensoredNodeError:
1241 1242 if self._repo.ui.config("censor", "policy") == "ignore":
1242 1243 return ""
1243 1244 raise error.Abort(_("censored node: %s") % short(self._filenode),
1244 1245 hint=_("set censor.policy to ignore errors"))
1245 1246
1246 1247 def size(self):
1247 1248 return self._filelog.size(self._filerev)
1248 1249
1249 1250 @propertycache
1250 1251 def _copied(self):
1251 1252 """check if file was actually renamed in this changeset revision
1252 1253
1253 1254 If rename logged in file revision, we report copy for changeset only
1254 1255 if file revisions linkrev points back to the changeset in question
1255 1256 or both changeset parents contain different file revisions.
1256 1257 """
1257 1258
1258 1259 renamed = self._filelog.renamed(self._filenode)
1259 1260 if not renamed:
1260 1261 return renamed
1261 1262
1262 1263 if self.rev() == self.linkrev():
1263 1264 return renamed
1264 1265
1265 1266 name = self.path()
1266 1267 fnode = self._filenode
1267 1268 for p in self._changectx.parents():
1268 1269 try:
1269 1270 if fnode == p.filenode(name):
1270 1271 return None
1271 1272 except error.LookupError:
1272 1273 pass
1273 1274 return renamed
1274 1275
1275 1276 def children(self):
1276 1277 # hard for renames
1277 1278 c = self._filelog.children(self._filenode)
1278 1279 return [filectx(self._repo, self._path, fileid=x,
1279 1280 filelog=self._filelog) for x in c]
1280 1281
1281 1282 class committablectx(basectx):
1282 1283 """A committablectx object provides common functionality for a context that
1283 1284 wants the ability to commit, e.g. workingctx or memctx."""
1284 1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1285 1286 changes=None):
1286 1287 self._repo = repo
1287 1288 self._rev = None
1288 1289 self._node = None
1289 1290 self._text = text
1290 1291 if date:
1291 1292 self._date = dateutil.parsedate(date)
1292 1293 if user:
1293 1294 self._user = user
1294 1295 if changes:
1295 1296 self._status = changes
1296 1297
1297 1298 self._extra = {}
1298 1299 if extra:
1299 1300 self._extra = extra.copy()
1300 1301 if 'branch' not in self._extra:
1301 1302 try:
1302 1303 branch = encoding.fromlocal(self._repo.dirstate.branch())
1303 1304 except UnicodeDecodeError:
1304 1305 raise error.Abort(_('branch name not in UTF-8!'))
1305 1306 self._extra['branch'] = branch
1306 1307 if self._extra['branch'] == '':
1307 1308 self._extra['branch'] = 'default'
1308 1309
1309 1310 def __bytes__(self):
1310 1311 return bytes(self._parents[0]) + "+"
1311 1312
1312 1313 __str__ = encoding.strmethod(__bytes__)
1313 1314
1314 1315 def __nonzero__(self):
1315 1316 return True
1316 1317
1317 1318 __bool__ = __nonzero__
1318 1319
1319 1320 def _buildflagfunc(self):
1320 1321 # Create a fallback function for getting file flags when the
1321 1322 # filesystem doesn't support them
1322 1323
1323 1324 copiesget = self._repo.dirstate.copies().get
1324 1325 parents = self.parents()
1325 1326 if len(parents) < 2:
1326 1327 # when we have one parent, it's easy: copy from parent
1327 1328 man = parents[0].manifest()
1328 1329 def func(f):
1329 1330 f = copiesget(f, f)
1330 1331 return man.flags(f)
1331 1332 else:
1332 1333 # merges are tricky: we try to reconstruct the unstored
1333 1334 # result from the merge (issue1802)
1334 1335 p1, p2 = parents
1335 1336 pa = p1.ancestor(p2)
1336 1337 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1337 1338
1338 1339 def func(f):
1339 1340 f = copiesget(f, f) # may be wrong for merges with copies
1340 1341 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1341 1342 if fl1 == fl2:
1342 1343 return fl1
1343 1344 if fl1 == fla:
1344 1345 return fl2
1345 1346 if fl2 == fla:
1346 1347 return fl1
1347 1348 return '' # punt for conflicts
1348 1349
1349 1350 return func
1350 1351
1351 1352 @propertycache
1352 1353 def _flagfunc(self):
1353 1354 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1354 1355
1355 1356 @propertycache
1356 1357 def _status(self):
1357 1358 return self._repo.status()
1358 1359
1359 1360 @propertycache
1360 1361 def _user(self):
1361 1362 return self._repo.ui.username()
1362 1363
1363 1364 @propertycache
1364 1365 def _date(self):
1365 1366 ui = self._repo.ui
1366 1367 date = ui.configdate('devel', 'default-date')
1367 1368 if date is None:
1368 1369 date = dateutil.makedate()
1369 1370 return date
1370 1371
1371 1372 def subrev(self, subpath):
1372 1373 return None
1373 1374
1374 1375 def manifestnode(self):
1375 1376 return None
1376 1377 def user(self):
1377 1378 return self._user or self._repo.ui.username()
1378 1379 def date(self):
1379 1380 return self._date
1380 1381 def description(self):
1381 1382 return self._text
1382 1383 def files(self):
1383 1384 return sorted(self._status.modified + self._status.added +
1384 1385 self._status.removed)
1385 1386
1386 1387 def modified(self):
1387 1388 return self._status.modified
1388 1389 def added(self):
1389 1390 return self._status.added
1390 1391 def removed(self):
1391 1392 return self._status.removed
1392 1393 def deleted(self):
1393 1394 return self._status.deleted
1394 1395 def branch(self):
1395 1396 return encoding.tolocal(self._extra['branch'])
1396 1397 def closesbranch(self):
1397 1398 return 'close' in self._extra
1398 1399 def extra(self):
1399 1400 return self._extra
1400 1401
1401 1402 def isinmemory(self):
1402 1403 return False
1403 1404
1404 1405 def tags(self):
1405 1406 return []
1406 1407
1407 1408 def bookmarks(self):
1408 1409 b = []
1409 1410 for p in self.parents():
1410 1411 b.extend(p.bookmarks())
1411 1412 return b
1412 1413
1413 1414 def phase(self):
1414 1415 phase = phases.draft # default phase to draft
1415 1416 for p in self.parents():
1416 1417 phase = max(phase, p.phase())
1417 1418 return phase
1418 1419
1419 1420 def hidden(self):
1420 1421 return False
1421 1422
1422 1423 def children(self):
1423 1424 return []
1424 1425
1425 1426 def flags(self, path):
1426 1427 if r'_manifest' in self.__dict__:
1427 1428 try:
1428 1429 return self._manifest.flags(path)
1429 1430 except KeyError:
1430 1431 return ''
1431 1432
1432 1433 try:
1433 1434 return self._flagfunc(path)
1434 1435 except OSError:
1435 1436 return ''
1436 1437
1437 1438 def ancestor(self, c2):
1438 1439 """return the "best" ancestor context of self and c2"""
1439 1440 return self._parents[0].ancestor(c2) # punt on two parents for now
1440 1441
1441 1442 def walk(self, match):
1442 1443 '''Generates matching file names.'''
1443 1444 return sorted(self._repo.dirstate.walk(match,
1444 1445 subrepos=sorted(self.substate),
1445 1446 unknown=True, ignored=False))
1446 1447
1447 1448 def matches(self, match):
1448 1449 return sorted(self._repo.dirstate.matches(match))
1449 1450
1450 1451 def ancestors(self):
1451 1452 for p in self._parents:
1452 1453 yield p
1453 1454 for a in self._repo.changelog.ancestors(
1454 1455 [p.rev() for p in self._parents]):
1455 1456 yield changectx(self._repo, a)
1456 1457
1457 1458 def markcommitted(self, node):
1458 1459 """Perform post-commit cleanup necessary after committing this ctx
1459 1460
1460 1461 Specifically, this updates backing stores this working context
1461 1462 wraps to reflect the fact that the changes reflected by this
1462 1463 workingctx have been committed. For example, it marks
1463 1464 modified and added files as normal in the dirstate.
1464 1465
1465 1466 """
1466 1467
1467 1468 with self._repo.dirstate.parentchange():
1468 1469 for f in self.modified() + self.added():
1469 1470 self._repo.dirstate.normal(f)
1470 1471 for f in self.removed():
1471 1472 self._repo.dirstate.drop(f)
1472 1473 self._repo.dirstate.setparents(node)
1473 1474
1474 1475 # write changes out explicitly, because nesting wlock at
1475 1476 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1476 1477 # from immediately doing so for subsequent changing files
1477 1478 self._repo.dirstate.write(self._repo.currenttransaction())
1478 1479
1479 1480 def dirty(self, missing=False, merge=True, branch=True):
1480 1481 return False
1481 1482
1482 1483 class workingctx(committablectx):
1483 1484 """A workingctx object makes access to data related to
1484 1485 the current working directory convenient.
1485 1486 date - any valid date string or (unixtime, offset), or None.
1486 1487 user - username string, or None.
1487 1488 extra - a dictionary of extra values, or None.
1488 1489 changes - a list of file lists as returned by localrepo.status()
1489 1490 or None to use the repository status.
1490 1491 """
1491 1492 def __init__(self, repo, text="", user=None, date=None, extra=None,
1492 1493 changes=None):
1493 1494 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1494 1495
1495 1496 def __iter__(self):
1496 1497 d = self._repo.dirstate
1497 1498 for f in d:
1498 1499 if d[f] != 'r':
1499 1500 yield f
1500 1501
1501 1502 def __contains__(self, key):
1502 1503 return self._repo.dirstate[key] not in "?r"
1503 1504
1504 1505 def hex(self):
1505 1506 return hex(wdirid)
1506 1507
1507 1508 @propertycache
1508 1509 def _parents(self):
1509 1510 p = self._repo.dirstate.parents()
1510 1511 if p[1] == nullid:
1511 1512 p = p[:-1]
1512 1513 return [changectx(self._repo, x) for x in p]
1513 1514
1514 1515 def filectx(self, path, filelog=None):
1515 1516 """get a file context from the working directory"""
1516 1517 return workingfilectx(self._repo, path, workingctx=self,
1517 1518 filelog=filelog)
1518 1519
1519 1520 def dirty(self, missing=False, merge=True, branch=True):
1520 1521 "check whether a working directory is modified"
1521 1522 # check subrepos first
1522 1523 for s in sorted(self.substate):
1523 1524 if self.sub(s).dirty(missing=missing):
1524 1525 return True
1525 1526 # check current working dir
1526 1527 return ((merge and self.p2()) or
1527 1528 (branch and self.branch() != self.p1().branch()) or
1528 1529 self.modified() or self.added() or self.removed() or
1529 1530 (missing and self.deleted()))
1530 1531
1531 1532 def add(self, list, prefix=""):
1532 1533 with self._repo.wlock():
1533 1534 ui, ds = self._repo.ui, self._repo.dirstate
1534 1535 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1535 1536 rejected = []
1536 1537 lstat = self._repo.wvfs.lstat
1537 1538 for f in list:
1538 1539 # ds.pathto() returns an absolute file when this is invoked from
1539 1540 # the keyword extension. That gets flagged as non-portable on
1540 1541 # Windows, since it contains the drive letter and colon.
1541 1542 scmutil.checkportable(ui, os.path.join(prefix, f))
1542 1543 try:
1543 1544 st = lstat(f)
1544 1545 except OSError:
1545 1546 ui.warn(_("%s does not exist!\n") % uipath(f))
1546 1547 rejected.append(f)
1547 1548 continue
1548 1549 if st.st_size > 10000000:
1549 1550 ui.warn(_("%s: up to %d MB of RAM may be required "
1550 1551 "to manage this file\n"
1551 1552 "(use 'hg revert %s' to cancel the "
1552 1553 "pending addition)\n")
1553 1554 % (f, 3 * st.st_size // 1000000, uipath(f)))
1554 1555 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1555 1556 ui.warn(_("%s not added: only files and symlinks "
1556 1557 "supported currently\n") % uipath(f))
1557 1558 rejected.append(f)
1558 1559 elif ds[f] in 'amn':
1559 1560 ui.warn(_("%s already tracked!\n") % uipath(f))
1560 1561 elif ds[f] == 'r':
1561 1562 ds.normallookup(f)
1562 1563 else:
1563 1564 ds.add(f)
1564 1565 return rejected
1565 1566
1566 1567 def forget(self, files, prefix=""):
1567 1568 with self._repo.wlock():
1568 1569 ds = self._repo.dirstate
1569 1570 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1570 1571 rejected = []
1571 1572 for f in files:
1572 1573 if f not in self._repo.dirstate:
1573 1574 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1574 1575 rejected.append(f)
1575 1576 elif self._repo.dirstate[f] != 'a':
1576 1577 self._repo.dirstate.remove(f)
1577 1578 else:
1578 1579 self._repo.dirstate.drop(f)
1579 1580 return rejected
1580 1581
1581 1582 def undelete(self, list):
1582 1583 pctxs = self.parents()
1583 1584 with self._repo.wlock():
1584 1585 ds = self._repo.dirstate
1585 1586 for f in list:
1586 1587 if self._repo.dirstate[f] != 'r':
1587 1588 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1588 1589 else:
1589 1590 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1590 1591 t = fctx.data()
1591 1592 self._repo.wwrite(f, t, fctx.flags())
1592 1593 self._repo.dirstate.normal(f)
1593 1594
1594 1595 def copy(self, source, dest):
1595 1596 try:
1596 1597 st = self._repo.wvfs.lstat(dest)
1597 1598 except OSError as err:
1598 1599 if err.errno != errno.ENOENT:
1599 1600 raise
1600 1601 self._repo.ui.warn(_("%s does not exist!\n")
1601 1602 % self._repo.dirstate.pathto(dest))
1602 1603 return
1603 1604 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1604 1605 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1605 1606 "symbolic link\n")
1606 1607 % self._repo.dirstate.pathto(dest))
1607 1608 else:
1608 1609 with self._repo.wlock():
1609 1610 if self._repo.dirstate[dest] in '?':
1610 1611 self._repo.dirstate.add(dest)
1611 1612 elif self._repo.dirstate[dest] in 'r':
1612 1613 self._repo.dirstate.normallookup(dest)
1613 1614 self._repo.dirstate.copy(source, dest)
1614 1615
1615 1616 def match(self, pats=None, include=None, exclude=None, default='glob',
1616 1617 listsubrepos=False, badfn=None):
1617 1618 r = self._repo
1618 1619
1619 1620 # Only a case insensitive filesystem needs magic to translate user input
1620 1621 # to actual case in the filesystem.
1621 1622 icasefs = not util.fscasesensitive(r.root)
1622 1623 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1623 1624 default, auditor=r.auditor, ctx=self,
1624 1625 listsubrepos=listsubrepos, badfn=badfn,
1625 1626 icasefs=icasefs)
1626 1627
1627 1628 def _filtersuspectsymlink(self, files):
1628 1629 if not files or self._repo.dirstate._checklink:
1629 1630 return files
1630 1631
1631 1632 # Symlink placeholders may get non-symlink-like contents
1632 1633 # via user error or dereferencing by NFS or Samba servers,
1633 1634 # so we filter out any placeholders that don't look like a
1634 1635 # symlink
1635 1636 sane = []
1636 1637 for f in files:
1637 1638 if self.flags(f) == 'l':
1638 1639 d = self[f].data()
1639 1640 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1640 1641 self._repo.ui.debug('ignoring suspect symlink placeholder'
1641 1642 ' "%s"\n' % f)
1642 1643 continue
1643 1644 sane.append(f)
1644 1645 return sane
1645 1646
1646 1647 def _checklookup(self, files):
1647 1648 # check for any possibly clean files
1648 1649 if not files:
1649 1650 return [], [], []
1650 1651
1651 1652 modified = []
1652 1653 deleted = []
1653 1654 fixup = []
1654 1655 pctx = self._parents[0]
1655 1656 # do a full compare of any files that might have changed
1656 1657 for f in sorted(files):
1657 1658 try:
1658 1659 # This will return True for a file that got replaced by a
1659 1660 # directory in the interim, but fixing that is pretty hard.
1660 1661 if (f not in pctx or self.flags(f) != pctx.flags(f)
1661 1662 or pctx[f].cmp(self[f])):
1662 1663 modified.append(f)
1663 1664 else:
1664 1665 fixup.append(f)
1665 1666 except (IOError, OSError):
1666 1667 # A file become inaccessible in between? Mark it as deleted,
1667 1668 # matching dirstate behavior (issue5584).
1668 1669 # The dirstate has more complex behavior around whether a
1669 1670 # missing file matches a directory, etc, but we don't need to
1670 1671 # bother with that: if f has made it to this point, we're sure
1671 1672 # it's in the dirstate.
1672 1673 deleted.append(f)
1673 1674
1674 1675 return modified, deleted, fixup
1675 1676
1676 1677 def _poststatusfixup(self, status, fixup):
1677 1678 """update dirstate for files that are actually clean"""
1678 1679 poststatus = self._repo.postdsstatus()
1679 1680 if fixup or poststatus:
1680 1681 try:
1681 1682 oldid = self._repo.dirstate.identity()
1682 1683
1683 1684 # updating the dirstate is optional
1684 1685 # so we don't wait on the lock
1685 1686 # wlock can invalidate the dirstate, so cache normal _after_
1686 1687 # taking the lock
1687 1688 with self._repo.wlock(False):
1688 1689 if self._repo.dirstate.identity() == oldid:
1689 1690 if fixup:
1690 1691 normal = self._repo.dirstate.normal
1691 1692 for f in fixup:
1692 1693 normal(f)
1693 1694 # write changes out explicitly, because nesting
1694 1695 # wlock at runtime may prevent 'wlock.release()'
1695 1696 # after this block from doing so for subsequent
1696 1697 # changing files
1697 1698 tr = self._repo.currenttransaction()
1698 1699 self._repo.dirstate.write(tr)
1699 1700
1700 1701 if poststatus:
1701 1702 for ps in poststatus:
1702 1703 ps(self, status)
1703 1704 else:
1704 1705 # in this case, writing changes out breaks
1705 1706 # consistency, because .hg/dirstate was
1706 1707 # already changed simultaneously after last
1707 1708 # caching (see also issue5584 for detail)
1708 1709 self._repo.ui.debug('skip updating dirstate: '
1709 1710 'identity mismatch\n')
1710 1711 except error.LockError:
1711 1712 pass
1712 1713 finally:
1713 1714 # Even if the wlock couldn't be grabbed, clear out the list.
1714 1715 self._repo.clearpostdsstatus()
1715 1716
1716 1717 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1717 1718 '''Gets the status from the dirstate -- internal use only.'''
1718 1719 subrepos = []
1719 1720 if '.hgsub' in self:
1720 1721 subrepos = sorted(self.substate)
1721 1722 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1722 1723 clean=clean, unknown=unknown)
1723 1724
1724 1725 # check for any possibly clean files
1725 1726 fixup = []
1726 1727 if cmp:
1727 1728 modified2, deleted2, fixup = self._checklookup(cmp)
1728 1729 s.modified.extend(modified2)
1729 1730 s.deleted.extend(deleted2)
1730 1731
1731 1732 if fixup and clean:
1732 1733 s.clean.extend(fixup)
1733 1734
1734 1735 self._poststatusfixup(s, fixup)
1735 1736
1736 1737 if match.always():
1737 1738 # cache for performance
1738 1739 if s.unknown or s.ignored or s.clean:
1739 1740 # "_status" is cached with list*=False in the normal route
1740 1741 self._status = scmutil.status(s.modified, s.added, s.removed,
1741 1742 s.deleted, [], [], [])
1742 1743 else:
1743 1744 self._status = s
1744 1745
1745 1746 return s
1746 1747
1747 1748 @propertycache
1748 1749 def _manifest(self):
1749 1750 """generate a manifest corresponding to the values in self._status
1750 1751
1751 1752 This reuse the file nodeid from parent, but we use special node
1752 1753 identifiers for added and modified files. This is used by manifests
1753 1754 merge to see that files are different and by update logic to avoid
1754 1755 deleting newly added files.
1755 1756 """
1756 1757 return self._buildstatusmanifest(self._status)
1757 1758
1758 1759 def _buildstatusmanifest(self, status):
1759 1760 """Builds a manifest that includes the given status results."""
1760 1761 parents = self.parents()
1761 1762
1762 1763 man = parents[0].manifest().copy()
1763 1764
1764 1765 ff = self._flagfunc
1765 1766 for i, l in ((addednodeid, status.added),
1766 1767 (modifiednodeid, status.modified)):
1767 1768 for f in l:
1768 1769 man[f] = i
1769 1770 try:
1770 1771 man.setflag(f, ff(f))
1771 1772 except OSError:
1772 1773 pass
1773 1774
1774 1775 for f in status.deleted + status.removed:
1775 1776 if f in man:
1776 1777 del man[f]
1777 1778
1778 1779 return man
1779 1780
1780 1781 def _buildstatus(self, other, s, match, listignored, listclean,
1781 1782 listunknown):
1782 1783 """build a status with respect to another context
1783 1784
1784 1785 This includes logic for maintaining the fast path of status when
1785 1786 comparing the working directory against its parent, which is to skip
1786 1787 building a new manifest if self (working directory) is not comparing
1787 1788 against its parent (repo['.']).
1788 1789 """
1789 1790 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1790 1791 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1791 1792 # might have accidentally ended up with the entire contents of the file
1792 1793 # they are supposed to be linking to.
1793 1794 s.modified[:] = self._filtersuspectsymlink(s.modified)
1794 1795 if other != self._repo['.']:
1795 1796 s = super(workingctx, self)._buildstatus(other, s, match,
1796 1797 listignored, listclean,
1797 1798 listunknown)
1798 1799 return s
1799 1800
1800 1801 def _matchstatus(self, other, match):
1801 1802 """override the match method with a filter for directory patterns
1802 1803
1803 1804 We use inheritance to customize the match.bad method only in cases of
1804 1805 workingctx since it belongs only to the working directory when
1805 1806 comparing against the parent changeset.
1806 1807
1807 1808 If we aren't comparing against the working directory's parent, then we
1808 1809 just use the default match object sent to us.
1809 1810 """
1810 1811 if other != self._repo['.']:
1811 1812 def bad(f, msg):
1812 1813 # 'f' may be a directory pattern from 'match.files()',
1813 1814 # so 'f not in ctx1' is not enough
1814 1815 if f not in other and not other.hasdir(f):
1815 1816 self._repo.ui.warn('%s: %s\n' %
1816 1817 (self._repo.dirstate.pathto(f), msg))
1817 1818 match.bad = bad
1818 1819 return match
1819 1820
1820 1821 def markcommitted(self, node):
1821 1822 super(workingctx, self).markcommitted(node)
1822 1823
1823 1824 sparse.aftercommit(self._repo, node)
1824 1825
1825 1826 class committablefilectx(basefilectx):
1826 1827 """A committablefilectx provides common functionality for a file context
1827 1828 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1828 1829 def __init__(self, repo, path, filelog=None, ctx=None):
1829 1830 self._repo = repo
1830 1831 self._path = path
1831 1832 self._changeid = None
1832 1833 self._filerev = self._filenode = None
1833 1834
1834 1835 if filelog is not None:
1835 1836 self._filelog = filelog
1836 1837 if ctx:
1837 1838 self._changectx = ctx
1838 1839
1839 1840 def __nonzero__(self):
1840 1841 return True
1841 1842
1842 1843 __bool__ = __nonzero__
1843 1844
1844 1845 def linkrev(self):
1845 1846 # linked to self._changectx no matter if file is modified or not
1846 1847 return self.rev()
1847 1848
1848 1849 def parents(self):
1849 1850 '''return parent filectxs, following copies if necessary'''
1850 1851 def filenode(ctx, path):
1851 1852 return ctx._manifest.get(path, nullid)
1852 1853
1853 1854 path = self._path
1854 1855 fl = self._filelog
1855 1856 pcl = self._changectx._parents
1856 1857 renamed = self.renamed()
1857 1858
1858 1859 if renamed:
1859 1860 pl = [renamed + (None,)]
1860 1861 else:
1861 1862 pl = [(path, filenode(pcl[0], path), fl)]
1862 1863
1863 1864 for pc in pcl[1:]:
1864 1865 pl.append((path, filenode(pc, path), fl))
1865 1866
1866 1867 return [self._parentfilectx(p, fileid=n, filelog=l)
1867 1868 for p, n, l in pl if n != nullid]
1868 1869
1869 1870 def children(self):
1870 1871 return []
1871 1872
1872 1873 class workingfilectx(committablefilectx):
1873 1874 """A workingfilectx object makes access to data related to a particular
1874 1875 file in the working directory convenient."""
1875 1876 def __init__(self, repo, path, filelog=None, workingctx=None):
1876 1877 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1877 1878
1878 1879 @propertycache
1879 1880 def _changectx(self):
1880 1881 return workingctx(self._repo)
1881 1882
1882 1883 def data(self):
1883 1884 return self._repo.wread(self._path)
1884 1885 def renamed(self):
1885 1886 rp = self._repo.dirstate.copied(self._path)
1886 1887 if not rp:
1887 1888 return None
1888 1889 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1889 1890
1890 1891 def size(self):
1891 1892 return self._repo.wvfs.lstat(self._path).st_size
1892 1893 def date(self):
1893 1894 t, tz = self._changectx.date()
1894 1895 try:
1895 1896 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1896 1897 except OSError as err:
1897 1898 if err.errno != errno.ENOENT:
1898 1899 raise
1899 1900 return (t, tz)
1900 1901
1901 1902 def exists(self):
1902 1903 return self._repo.wvfs.exists(self._path)
1903 1904
1904 1905 def lexists(self):
1905 1906 return self._repo.wvfs.lexists(self._path)
1906 1907
1907 1908 def audit(self):
1908 1909 return self._repo.wvfs.audit(self._path)
1909 1910
1910 1911 def cmp(self, fctx):
1911 1912 """compare with other file context
1912 1913
1913 1914 returns True if different than fctx.
1914 1915 """
1915 1916 # fctx should be a filectx (not a workingfilectx)
1916 1917 # invert comparison to reuse the same code path
1917 1918 return fctx.cmp(self)
1918 1919
1919 1920 def remove(self, ignoremissing=False):
1920 1921 """wraps unlink for a repo's working directory"""
1921 1922 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1922 1923
1923 1924 def write(self, data, flags, backgroundclose=False, **kwargs):
1924 1925 """wraps repo.wwrite"""
1925 1926 self._repo.wwrite(self._path, data, flags,
1926 1927 backgroundclose=backgroundclose,
1927 1928 **kwargs)
1928 1929
1929 1930 def markcopied(self, src):
1930 1931 """marks this file a copy of `src`"""
1931 1932 if self._repo.dirstate[self._path] in "nma":
1932 1933 self._repo.dirstate.copy(src, self._path)
1933 1934
1934 1935 def clearunknown(self):
1935 1936 """Removes conflicting items in the working directory so that
1936 1937 ``write()`` can be called successfully.
1937 1938 """
1938 1939 wvfs = self._repo.wvfs
1939 1940 f = self._path
1940 1941 wvfs.audit(f)
1941 1942 if wvfs.isdir(f) and not wvfs.islink(f):
1942 1943 wvfs.rmtree(f, forcibly=True)
1943 1944 for p in reversed(list(util.finddirs(f))):
1944 1945 if wvfs.isfileorlink(p):
1945 1946 wvfs.unlink(p)
1946 1947 break
1947 1948
1948 1949 def setflags(self, l, x):
1949 1950 self._repo.wvfs.setflags(self._path, l, x)
1950 1951
1951 1952 class overlayworkingctx(committablectx):
1952 1953 """Wraps another mutable context with a write-back cache that can be
1953 1954 converted into a commit context.
1954 1955
1955 1956 self._cache[path] maps to a dict with keys: {
1956 1957 'exists': bool?
1957 1958 'date': date?
1958 1959 'data': str?
1959 1960 'flags': str?
1960 1961 'copied': str? (path or None)
1961 1962 }
1962 1963 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1963 1964 is `False`, the file was deleted.
1964 1965 """
1965 1966
1966 1967 def __init__(self, repo):
1967 1968 super(overlayworkingctx, self).__init__(repo)
1968 1969 self._repo = repo
1969 1970 self.clean()
1970 1971
1971 1972 def setbase(self, wrappedctx):
1972 1973 self._wrappedctx = wrappedctx
1973 1974 self._parents = [wrappedctx]
1974 1975 # Drop old manifest cache as it is now out of date.
1975 1976 # This is necessary when, e.g., rebasing several nodes with one
1976 1977 # ``overlayworkingctx`` (e.g. with --collapse).
1977 1978 util.clearcachedproperty(self, '_manifest')
1978 1979
1979 1980 def data(self, path):
1980 1981 if self.isdirty(path):
1981 1982 if self._cache[path]['exists']:
1982 1983 if self._cache[path]['data']:
1983 1984 return self._cache[path]['data']
1984 1985 else:
1985 1986 # Must fallback here, too, because we only set flags.
1986 1987 return self._wrappedctx[path].data()
1987 1988 else:
1988 1989 raise error.ProgrammingError("No such file or directory: %s" %
1989 1990 path)
1990 1991 else:
1991 1992 return self._wrappedctx[path].data()
1992 1993
1993 1994 @propertycache
1994 1995 def _manifest(self):
1995 1996 parents = self.parents()
1996 1997 man = parents[0].manifest().copy()
1997 1998
1998 1999 flag = self._flagfunc
1999 2000 for path in self.added():
2000 2001 man[path] = addednodeid
2001 2002 man.setflag(path, flag(path))
2002 2003 for path in self.modified():
2003 2004 man[path] = modifiednodeid
2004 2005 man.setflag(path, flag(path))
2005 2006 for path in self.removed():
2006 2007 del man[path]
2007 2008 return man
2008 2009
2009 2010 @propertycache
2010 2011 def _flagfunc(self):
2011 2012 def f(path):
2012 2013 return self._cache[path]['flags']
2013 2014 return f
2014 2015
2015 2016 def files(self):
2016 2017 return sorted(self.added() + self.modified() + self.removed())
2017 2018
2018 2019 def modified(self):
2019 2020 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2020 2021 self._existsinparent(f)]
2021 2022
2022 2023 def added(self):
2023 2024 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2024 2025 not self._existsinparent(f)]
2025 2026
2026 2027 def removed(self):
2027 2028 return [f for f in self._cache.keys() if
2028 2029 not self._cache[f]['exists'] and self._existsinparent(f)]
2029 2030
2030 2031 def isinmemory(self):
2031 2032 return True
2032 2033
2033 2034 def filedate(self, path):
2034 2035 if self.isdirty(path):
2035 2036 return self._cache[path]['date']
2036 2037 else:
2037 2038 return self._wrappedctx[path].date()
2038 2039
2039 2040 def markcopied(self, path, origin):
2040 2041 if self.isdirty(path):
2041 2042 self._cache[path]['copied'] = origin
2042 2043 else:
2043 2044 raise error.ProgrammingError('markcopied() called on clean context')
2044 2045
2045 2046 def copydata(self, path):
2046 2047 if self.isdirty(path):
2047 2048 return self._cache[path]['copied']
2048 2049 else:
2049 2050 raise error.ProgrammingError('copydata() called on clean context')
2050 2051
2051 2052 def flags(self, path):
2052 2053 if self.isdirty(path):
2053 2054 if self._cache[path]['exists']:
2054 2055 return self._cache[path]['flags']
2055 2056 else:
2056 2057 raise error.ProgrammingError("No such file or directory: %s" %
2057 2058 self._path)
2058 2059 else:
2059 2060 return self._wrappedctx[path].flags()
2060 2061
2061 2062 def _existsinparent(self, path):
2062 2063 try:
2063 2064 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2064 2065 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2065 2066 # with an ``exists()`` function.
2066 2067 self._wrappedctx[path]
2067 2068 return True
2068 2069 except error.ManifestLookupError:
2069 2070 return False
2070 2071
2071 2072 def _auditconflicts(self, path):
2072 2073 """Replicates conflict checks done by wvfs.write().
2073 2074
2074 2075 Since we never write to the filesystem and never call `applyupdates` in
2075 2076 IMM, we'll never check that a path is actually writable -- e.g., because
2076 2077 it adds `a/foo`, but `a` is actually a file in the other commit.
2077 2078 """
2078 2079 def fail(path, component):
2079 2080 # p1() is the base and we're receiving "writes" for p2()'s
2080 2081 # files.
2081 2082 if 'l' in self.p1()[component].flags():
2082 2083 raise error.Abort("error: %s conflicts with symlink %s "
2083 2084 "in %s." % (path, component,
2084 2085 self.p1().rev()))
2085 2086 else:
2086 2087 raise error.Abort("error: '%s' conflicts with file '%s' in "
2087 2088 "%s." % (path, component,
2088 2089 self.p1().rev()))
2089 2090
2090 2091 # Test that each new directory to be created to write this path from p2
2091 2092 # is not a file in p1.
2092 2093 components = path.split('/')
2093 2094 for i in xrange(len(components)):
2094 2095 component = "/".join(components[0:i])
2095 2096 if component in self.p1():
2096 2097 fail(path, component)
2097 2098
2098 2099 # Test the other direction -- that this path from p2 isn't a directory
2099 2100 # in p1 (test that p1 doesn't any paths matching `path/*`).
2100 2101 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
2101 2102 matches = self.p1().manifest().matches(match)
2102 2103 if len(matches) > 0:
2103 2104 if len(matches) == 1 and matches.keys()[0] == path:
2104 2105 return
2105 2106 raise error.Abort("error: file '%s' cannot be written because "
2106 2107 " '%s/' is a folder in %s (containing %d "
2107 2108 "entries: %s)"
2108 2109 % (path, path, self.p1(), len(matches),
2109 2110 ', '.join(matches.keys())))
2110 2111
2111 2112 def write(self, path, data, flags='', **kwargs):
2112 2113 if data is None:
2113 2114 raise error.ProgrammingError("data must be non-None")
2114 2115 self._auditconflicts(path)
2115 2116 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2116 2117 flags=flags)
2117 2118
2118 2119 def setflags(self, path, l, x):
2119 2120 self._markdirty(path, exists=True, date=dateutil.makedate(),
2120 2121 flags=(l and 'l' or '') + (x and 'x' or ''))
2121 2122
2122 2123 def remove(self, path):
2123 2124 self._markdirty(path, exists=False)
2124 2125
2125 2126 def exists(self, path):
2126 2127 """exists behaves like `lexists`, but needs to follow symlinks and
2127 2128 return False if they are broken.
2128 2129 """
2129 2130 if self.isdirty(path):
2130 2131 # If this path exists and is a symlink, "follow" it by calling
2131 2132 # exists on the destination path.
2132 2133 if (self._cache[path]['exists'] and
2133 2134 'l' in self._cache[path]['flags']):
2134 2135 return self.exists(self._cache[path]['data'].strip())
2135 2136 else:
2136 2137 return self._cache[path]['exists']
2137 2138
2138 2139 return self._existsinparent(path)
2139 2140
2140 2141 def lexists(self, path):
2141 2142 """lexists returns True if the path exists"""
2142 2143 if self.isdirty(path):
2143 2144 return self._cache[path]['exists']
2144 2145
2145 2146 return self._existsinparent(path)
2146 2147
2147 2148 def size(self, path):
2148 2149 if self.isdirty(path):
2149 2150 if self._cache[path]['exists']:
2150 2151 return len(self._cache[path]['data'])
2151 2152 else:
2152 2153 raise error.ProgrammingError("No such file or directory: %s" %
2153 2154 self._path)
2154 2155 return self._wrappedctx[path].size()
2155 2156
2156 2157 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2157 2158 user=None, editor=None):
2158 2159 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2159 2160 committed.
2160 2161
2161 2162 ``text`` is the commit message.
2162 2163 ``parents`` (optional) are rev numbers.
2163 2164 """
2164 2165 # Default parents to the wrapped contexts' if not passed.
2165 2166 if parents is None:
2166 2167 parents = self._wrappedctx.parents()
2167 2168 if len(parents) == 1:
2168 2169 parents = (parents[0], None)
2169 2170
2170 2171 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2171 2172 if parents[1] is None:
2172 2173 parents = (self._repo[parents[0]], None)
2173 2174 else:
2174 2175 parents = (self._repo[parents[0]], self._repo[parents[1]])
2175 2176
2176 2177 files = self._cache.keys()
2177 2178 def getfile(repo, memctx, path):
2178 2179 if self._cache[path]['exists']:
2179 2180 return memfilectx(repo, memctx, path,
2180 2181 self._cache[path]['data'],
2181 2182 'l' in self._cache[path]['flags'],
2182 2183 'x' in self._cache[path]['flags'],
2183 2184 self._cache[path]['copied'])
2184 2185 else:
2185 2186 # Returning None, but including the path in `files`, is
2186 2187 # necessary for memctx to register a deletion.
2187 2188 return None
2188 2189 return memctx(self._repo, parents, text, files, getfile, date=date,
2189 2190 extra=extra, user=user, branch=branch, editor=editor)
2190 2191
2191 2192 def isdirty(self, path):
2192 2193 return path in self._cache
2193 2194
2194 2195 def isempty(self):
2195 2196 # We need to discard any keys that are actually clean before the empty
2196 2197 # commit check.
2197 2198 self._compact()
2198 2199 return len(self._cache) == 0
2199 2200
2200 2201 def clean(self):
2201 2202 self._cache = {}
2202 2203
2203 2204 def _compact(self):
2204 2205 """Removes keys from the cache that are actually clean, by comparing
2205 2206 them with the underlying context.
2206 2207
2207 2208 This can occur during the merge process, e.g. by passing --tool :local
2208 2209 to resolve a conflict.
2209 2210 """
2210 2211 keys = []
2211 2212 for path in self._cache.keys():
2212 2213 cache = self._cache[path]
2213 2214 try:
2214 2215 underlying = self._wrappedctx[path]
2215 2216 if (underlying.data() == cache['data'] and
2216 2217 underlying.flags() == cache['flags']):
2217 2218 keys.append(path)
2218 2219 except error.ManifestLookupError:
2219 2220 # Path not in the underlying manifest (created).
2220 2221 continue
2221 2222
2222 2223 for path in keys:
2223 2224 del self._cache[path]
2224 2225 return keys
2225 2226
2226 2227 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2227 2228 self._cache[path] = {
2228 2229 'exists': exists,
2229 2230 'data': data,
2230 2231 'date': date,
2231 2232 'flags': flags,
2232 2233 'copied': None,
2233 2234 }
2234 2235
2235 2236 def filectx(self, path, filelog=None):
2236 2237 return overlayworkingfilectx(self._repo, path, parent=self,
2237 2238 filelog=filelog)
2238 2239
2239 2240 class overlayworkingfilectx(committablefilectx):
2240 2241 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2241 2242 cache, which can be flushed through later by calling ``flush()``."""
2242 2243
2243 2244 def __init__(self, repo, path, filelog=None, parent=None):
2244 2245 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2245 2246 parent)
2246 2247 self._repo = repo
2247 2248 self._parent = parent
2248 2249 self._path = path
2249 2250
2250 2251 def cmp(self, fctx):
2251 2252 return self.data() != fctx.data()
2252 2253
2253 2254 def changectx(self):
2254 2255 return self._parent
2255 2256
2256 2257 def data(self):
2257 2258 return self._parent.data(self._path)
2258 2259
2259 2260 def date(self):
2260 2261 return self._parent.filedate(self._path)
2261 2262
2262 2263 def exists(self):
2263 2264 return self.lexists()
2264 2265
2265 2266 def lexists(self):
2266 2267 return self._parent.exists(self._path)
2267 2268
2268 2269 def renamed(self):
2269 2270 path = self._parent.copydata(self._path)
2270 2271 if not path:
2271 2272 return None
2272 2273 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2273 2274
2274 2275 def size(self):
2275 2276 return self._parent.size(self._path)
2276 2277
2277 2278 def markcopied(self, origin):
2278 2279 self._parent.markcopied(self._path, origin)
2279 2280
2280 2281 def audit(self):
2281 2282 pass
2282 2283
2283 2284 def flags(self):
2284 2285 return self._parent.flags(self._path)
2285 2286
2286 2287 def setflags(self, islink, isexec):
2287 2288 return self._parent.setflags(self._path, islink, isexec)
2288 2289
2289 2290 def write(self, data, flags, backgroundclose=False, **kwargs):
2290 2291 return self._parent.write(self._path, data, flags, **kwargs)
2291 2292
2292 2293 def remove(self, ignoremissing=False):
2293 2294 return self._parent.remove(self._path)
2294 2295
2295 2296 def clearunknown(self):
2296 2297 pass
2297 2298
2298 2299 class workingcommitctx(workingctx):
2299 2300 """A workingcommitctx object makes access to data related to
2300 2301 the revision being committed convenient.
2301 2302
2302 2303 This hides changes in the working directory, if they aren't
2303 2304 committed in this context.
2304 2305 """
2305 2306 def __init__(self, repo, changes,
2306 2307 text="", user=None, date=None, extra=None):
2307 2308 super(workingctx, self).__init__(repo, text, user, date, extra,
2308 2309 changes)
2309 2310
2310 2311 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2311 2312 """Return matched files only in ``self._status``
2312 2313
2313 2314 Uncommitted files appear "clean" via this context, even if
2314 2315 they aren't actually so in the working directory.
2315 2316 """
2316 2317 if clean:
2317 2318 clean = [f for f in self._manifest if f not in self._changedset]
2318 2319 else:
2319 2320 clean = []
2320 2321 return scmutil.status([f for f in self._status.modified if match(f)],
2321 2322 [f for f in self._status.added if match(f)],
2322 2323 [f for f in self._status.removed if match(f)],
2323 2324 [], [], [], clean)
2324 2325
2325 2326 @propertycache
2326 2327 def _changedset(self):
2327 2328 """Return the set of files changed in this context
2328 2329 """
2329 2330 changed = set(self._status.modified)
2330 2331 changed.update(self._status.added)
2331 2332 changed.update(self._status.removed)
2332 2333 return changed
2333 2334
2334 2335 def makecachingfilectxfn(func):
2335 2336 """Create a filectxfn that caches based on the path.
2336 2337
2337 2338 We can't use util.cachefunc because it uses all arguments as the cache
2338 2339 key and this creates a cycle since the arguments include the repo and
2339 2340 memctx.
2340 2341 """
2341 2342 cache = {}
2342 2343
2343 2344 def getfilectx(repo, memctx, path):
2344 2345 if path not in cache:
2345 2346 cache[path] = func(repo, memctx, path)
2346 2347 return cache[path]
2347 2348
2348 2349 return getfilectx
2349 2350
2350 2351 def memfilefromctx(ctx):
2351 2352 """Given a context return a memfilectx for ctx[path]
2352 2353
2353 2354 This is a convenience method for building a memctx based on another
2354 2355 context.
2355 2356 """
2356 2357 def getfilectx(repo, memctx, path):
2357 2358 fctx = ctx[path]
2358 2359 # this is weird but apparently we only keep track of one parent
2359 2360 # (why not only store that instead of a tuple?)
2360 2361 copied = fctx.renamed()
2361 2362 if copied:
2362 2363 copied = copied[0]
2363 2364 return memfilectx(repo, memctx, path, fctx.data(),
2364 2365 islink=fctx.islink(), isexec=fctx.isexec(),
2365 2366 copied=copied)
2366 2367
2367 2368 return getfilectx
2368 2369
2369 2370 def memfilefrompatch(patchstore):
2370 2371 """Given a patch (e.g. patchstore object) return a memfilectx
2371 2372
2372 2373 This is a convenience method for building a memctx based on a patchstore.
2373 2374 """
2374 2375 def getfilectx(repo, memctx, path):
2375 2376 data, mode, copied = patchstore.getfile(path)
2376 2377 if data is None:
2377 2378 return None
2378 2379 islink, isexec = mode
2379 2380 return memfilectx(repo, memctx, path, data, islink=islink,
2380 2381 isexec=isexec, copied=copied)
2381 2382
2382 2383 return getfilectx
2383 2384
2384 2385 class memctx(committablectx):
2385 2386 """Use memctx to perform in-memory commits via localrepo.commitctx().
2386 2387
2387 2388 Revision information is supplied at initialization time while
2388 2389 related files data and is made available through a callback
2389 2390 mechanism. 'repo' is the current localrepo, 'parents' is a
2390 2391 sequence of two parent revisions identifiers (pass None for every
2391 2392 missing parent), 'text' is the commit message and 'files' lists
2392 2393 names of files touched by the revision (normalized and relative to
2393 2394 repository root).
2394 2395
2395 2396 filectxfn(repo, memctx, path) is a callable receiving the
2396 2397 repository, the current memctx object and the normalized path of
2397 2398 requested file, relative to repository root. It is fired by the
2398 2399 commit function for every file in 'files', but calls order is
2399 2400 undefined. If the file is available in the revision being
2400 2401 committed (updated or added), filectxfn returns a memfilectx
2401 2402 object. If the file was removed, filectxfn return None for recent
2402 2403 Mercurial. Moved files are represented by marking the source file
2403 2404 removed and the new file added with copy information (see
2404 2405 memfilectx).
2405 2406
2406 2407 user receives the committer name and defaults to current
2407 2408 repository username, date is the commit date in any format
2408 2409 supported by dateutil.parsedate() and defaults to current date, extra
2409 2410 is a dictionary of metadata or is left empty.
2410 2411 """
2411 2412
2412 2413 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2413 2414 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2414 2415 # this field to determine what to do in filectxfn.
2415 2416 _returnnoneformissingfiles = True
2416 2417
2417 2418 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2418 2419 date=None, extra=None, branch=None, editor=False):
2419 2420 super(memctx, self).__init__(repo, text, user, date, extra)
2420 2421 self._rev = None
2421 2422 self._node = None
2422 2423 parents = [(p or nullid) for p in parents]
2423 2424 p1, p2 = parents
2424 2425 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2425 2426 files = sorted(set(files))
2426 2427 self._files = files
2427 2428 if branch is not None:
2428 2429 self._extra['branch'] = encoding.fromlocal(branch)
2429 2430 self.substate = {}
2430 2431
2431 2432 if isinstance(filectxfn, patch.filestore):
2432 2433 filectxfn = memfilefrompatch(filectxfn)
2433 2434 elif not callable(filectxfn):
2434 2435 # if store is not callable, wrap it in a function
2435 2436 filectxfn = memfilefromctx(filectxfn)
2436 2437
2437 2438 # memoizing increases performance for e.g. vcs convert scenarios.
2438 2439 self._filectxfn = makecachingfilectxfn(filectxfn)
2439 2440
2440 2441 if editor:
2441 2442 self._text = editor(self._repo, self, [])
2442 2443 self._repo.savecommitmessage(self._text)
2443 2444
2444 2445 def filectx(self, path, filelog=None):
2445 2446 """get a file context from the working directory
2446 2447
2447 2448 Returns None if file doesn't exist and should be removed."""
2448 2449 return self._filectxfn(self._repo, self, path)
2449 2450
2450 2451 def commit(self):
2451 2452 """commit context to the repo"""
2452 2453 return self._repo.commitctx(self)
2453 2454
2454 2455 @propertycache
2455 2456 def _manifest(self):
2456 2457 """generate a manifest based on the return values of filectxfn"""
2457 2458
2458 2459 # keep this simple for now; just worry about p1
2459 2460 pctx = self._parents[0]
2460 2461 man = pctx.manifest().copy()
2461 2462
2462 2463 for f in self._status.modified:
2463 2464 p1node = nullid
2464 2465 p2node = nullid
2465 2466 p = pctx[f].parents() # if file isn't in pctx, check p2?
2466 2467 if len(p) > 0:
2467 2468 p1node = p[0].filenode()
2468 2469 if len(p) > 1:
2469 2470 p2node = p[1].filenode()
2470 2471 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2471 2472
2472 2473 for f in self._status.added:
2473 2474 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2474 2475
2475 2476 for f in self._status.removed:
2476 2477 if f in man:
2477 2478 del man[f]
2478 2479
2479 2480 return man
2480 2481
2481 2482 @propertycache
2482 2483 def _status(self):
2483 2484 """Calculate exact status from ``files`` specified at construction
2484 2485 """
2485 2486 man1 = self.p1().manifest()
2486 2487 p2 = self._parents[1]
2487 2488 # "1 < len(self._parents)" can't be used for checking
2488 2489 # existence of the 2nd parent, because "memctx._parents" is
2489 2490 # explicitly initialized by the list, of which length is 2.
2490 2491 if p2.node() != nullid:
2491 2492 man2 = p2.manifest()
2492 2493 managing = lambda f: f in man1 or f in man2
2493 2494 else:
2494 2495 managing = lambda f: f in man1
2495 2496
2496 2497 modified, added, removed = [], [], []
2497 2498 for f in self._files:
2498 2499 if not managing(f):
2499 2500 added.append(f)
2500 2501 elif self[f]:
2501 2502 modified.append(f)
2502 2503 else:
2503 2504 removed.append(f)
2504 2505
2505 2506 return scmutil.status(modified, added, removed, [], [], [], [])
2506 2507
2507 2508 class memfilectx(committablefilectx):
2508 2509 """memfilectx represents an in-memory file to commit.
2509 2510
2510 2511 See memctx and committablefilectx for more details.
2511 2512 """
2512 2513 def __init__(self, repo, changectx, path, data, islink=False,
2513 2514 isexec=False, copied=None):
2514 2515 """
2515 2516 path is the normalized file path relative to repository root.
2516 2517 data is the file content as a string.
2517 2518 islink is True if the file is a symbolic link.
2518 2519 isexec is True if the file is executable.
2519 2520 copied is the source file path if current file was copied in the
2520 2521 revision being committed, or None."""
2521 2522 super(memfilectx, self).__init__(repo, path, None, changectx)
2522 2523 self._data = data
2523 2524 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2524 2525 self._copied = None
2525 2526 if copied:
2526 2527 self._copied = (copied, nullid)
2527 2528
2528 2529 def data(self):
2529 2530 return self._data
2530 2531
2531 2532 def remove(self, ignoremissing=False):
2532 2533 """wraps unlink for a repo's working directory"""
2533 2534 # need to figure out what to do here
2534 2535 del self._changectx[self._path]
2535 2536
2536 2537 def write(self, data, flags, **kwargs):
2537 2538 """wraps repo.wwrite"""
2538 2539 self._data = data
2539 2540
2540 2541 class overlayfilectx(committablefilectx):
2541 2542 """Like memfilectx but take an original filectx and optional parameters to
2542 2543 override parts of it. This is useful when fctx.data() is expensive (i.e.
2543 2544 flag processor is expensive) and raw data, flags, and filenode could be
2544 2545 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2545 2546 """
2546 2547
2547 2548 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2548 2549 copied=None, ctx=None):
2549 2550 """originalfctx: filecontext to duplicate
2550 2551
2551 2552 datafunc: None or a function to override data (file content). It is a
2552 2553 function to be lazy. path, flags, copied, ctx: None or overridden value
2553 2554
2554 2555 copied could be (path, rev), or False. copied could also be just path,
2555 2556 and will be converted to (path, nullid). This simplifies some callers.
2556 2557 """
2557 2558
2558 2559 if path is None:
2559 2560 path = originalfctx.path()
2560 2561 if ctx is None:
2561 2562 ctx = originalfctx.changectx()
2562 2563 ctxmatch = lambda: True
2563 2564 else:
2564 2565 ctxmatch = lambda: ctx == originalfctx.changectx()
2565 2566
2566 2567 repo = originalfctx.repo()
2567 2568 flog = originalfctx.filelog()
2568 2569 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2569 2570
2570 2571 if copied is None:
2571 2572 copied = originalfctx.renamed()
2572 2573 copiedmatch = lambda: True
2573 2574 else:
2574 2575 if copied and not isinstance(copied, tuple):
2575 2576 # repo._filecommit will recalculate copyrev so nullid is okay
2576 2577 copied = (copied, nullid)
2577 2578 copiedmatch = lambda: copied == originalfctx.renamed()
2578 2579
2579 2580 # When data, copied (could affect data), ctx (could affect filelog
2580 2581 # parents) are not overridden, rawdata, rawflags, and filenode may be
2581 2582 # reused (repo._filecommit should double check filelog parents).
2582 2583 #
2583 2584 # path, flags are not hashed in filelog (but in manifestlog) so they do
2584 2585 # not affect reusable here.
2585 2586 #
2586 2587 # If ctx or copied is overridden to a same value with originalfctx,
2587 2588 # still consider it's reusable. originalfctx.renamed() may be a bit
2588 2589 # expensive so it's not called unless necessary. Assuming datafunc is
2589 2590 # always expensive, do not call it for this "reusable" test.
2590 2591 reusable = datafunc is None and ctxmatch() and copiedmatch()
2591 2592
2592 2593 if datafunc is None:
2593 2594 datafunc = originalfctx.data
2594 2595 if flags is None:
2595 2596 flags = originalfctx.flags()
2596 2597
2597 2598 self._datafunc = datafunc
2598 2599 self._flags = flags
2599 2600 self._copied = copied
2600 2601
2601 2602 if reusable:
2602 2603 # copy extra fields from originalfctx
2603 2604 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2604 2605 for attr_ in attrs:
2605 2606 if util.safehasattr(originalfctx, attr_):
2606 2607 setattr(self, attr_, getattr(originalfctx, attr_))
2607 2608
2608 2609 def data(self):
2609 2610 return self._datafunc()
2610 2611
2611 2612 class metadataonlyctx(committablectx):
2612 2613 """Like memctx but it's reusing the manifest of different commit.
2613 2614 Intended to be used by lightweight operations that are creating
2614 2615 metadata-only changes.
2615 2616
2616 2617 Revision information is supplied at initialization time. 'repo' is the
2617 2618 current localrepo, 'ctx' is original revision which manifest we're reuisng
2618 2619 'parents' is a sequence of two parent revisions identifiers (pass None for
2619 2620 every missing parent), 'text' is the commit.
2620 2621
2621 2622 user receives the committer name and defaults to current repository
2622 2623 username, date is the commit date in any format supported by
2623 2624 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2624 2625 metadata or is left empty.
2625 2626 """
2626 2627 def __new__(cls, repo, originalctx, *args, **kwargs):
2627 2628 return super(metadataonlyctx, cls).__new__(cls, repo)
2628 2629
2629 2630 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2630 2631 date=None, extra=None, editor=False):
2631 2632 if text is None:
2632 2633 text = originalctx.description()
2633 2634 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2634 2635 self._rev = None
2635 2636 self._node = None
2636 2637 self._originalctx = originalctx
2637 2638 self._manifestnode = originalctx.manifestnode()
2638 2639 if parents is None:
2639 2640 parents = originalctx.parents()
2640 2641 else:
2641 2642 parents = [repo[p] for p in parents if p is not None]
2642 2643 parents = parents[:]
2643 2644 while len(parents) < 2:
2644 2645 parents.append(repo[nullid])
2645 2646 p1, p2 = self._parents = parents
2646 2647
2647 2648 # sanity check to ensure that the reused manifest parents are
2648 2649 # manifests of our commit parents
2649 2650 mp1, mp2 = self.manifestctx().parents
2650 2651 if p1 != nullid and p1.manifestnode() != mp1:
2651 2652 raise RuntimeError('can\'t reuse the manifest: '
2652 2653 'its p1 doesn\'t match the new ctx p1')
2653 2654 if p2 != nullid and p2.manifestnode() != mp2:
2654 2655 raise RuntimeError('can\'t reuse the manifest: '
2655 2656 'its p2 doesn\'t match the new ctx p2')
2656 2657
2657 2658 self._files = originalctx.files()
2658 2659 self.substate = {}
2659 2660
2660 2661 if editor:
2661 2662 self._text = editor(self._repo, self, [])
2662 2663 self._repo.savecommitmessage(self._text)
2663 2664
2664 2665 def manifestnode(self):
2665 2666 return self._manifestnode
2666 2667
2667 2668 @property
2668 2669 def _manifestctx(self):
2669 2670 return self._repo.manifestlog[self._manifestnode]
2670 2671
2671 2672 def filectx(self, path, filelog=None):
2672 2673 return self._originalctx.filectx(path, filelog=filelog)
2673 2674
2674 2675 def commit(self):
2675 2676 """commit context to the repo"""
2676 2677 return self._repo.commitctx(self)
2677 2678
2678 2679 @property
2679 2680 def _manifest(self):
2680 2681 return self._originalctx.manifest()
2681 2682
2682 2683 @propertycache
2683 2684 def _status(self):
2684 2685 """Calculate exact status from ``files`` specified in the ``origctx``
2685 2686 and parents manifests.
2686 2687 """
2687 2688 man1 = self.p1().manifest()
2688 2689 p2 = self._parents[1]
2689 2690 # "1 < len(self._parents)" can't be used for checking
2690 2691 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2691 2692 # explicitly initialized by the list, of which length is 2.
2692 2693 if p2.node() != nullid:
2693 2694 man2 = p2.manifest()
2694 2695 managing = lambda f: f in man1 or f in man2
2695 2696 else:
2696 2697 managing = lambda f: f in man1
2697 2698
2698 2699 modified, added, removed = [], [], []
2699 2700 for f in self._files:
2700 2701 if not managing(f):
2701 2702 added.append(f)
2702 2703 elif f in self:
2703 2704 modified.append(f)
2704 2705 else:
2705 2706 removed.append(f)
2706 2707
2707 2708 return scmutil.status(modified, added, removed, [], [], [], [])
2708 2709
2709 2710 class arbitraryfilectx(object):
2710 2711 """Allows you to use filectx-like functions on a file in an arbitrary
2711 2712 location on disk, possibly not in the working directory.
2712 2713 """
2713 2714 def __init__(self, path, repo=None):
2714 2715 # Repo is optional because contrib/simplemerge uses this class.
2715 2716 self._repo = repo
2716 2717 self._path = path
2717 2718
2718 2719 def cmp(self, fctx):
2719 2720 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2720 2721 # path if either side is a symlink.
2721 2722 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2722 2723 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2723 2724 # Add a fast-path for merge if both sides are disk-backed.
2724 2725 # Note that filecmp uses the opposite return values (True if same)
2725 2726 # from our cmp functions (True if different).
2726 2727 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2727 2728 return self.data() != fctx.data()
2728 2729
2729 2730 def path(self):
2730 2731 return self._path
2731 2732
2732 2733 def flags(self):
2733 2734 return ''
2734 2735
2735 2736 def data(self):
2736 2737 return util.readfile(self._path)
2737 2738
2738 2739 def decodeddata(self):
2739 2740 with open(self._path, "rb") as f:
2740 2741 return f.read()
2741 2742
2742 2743 def remove(self):
2743 2744 util.unlink(self._path)
2744 2745
2745 2746 def write(self, data, flags, **kwargs):
2746 2747 assert not flags
2747 2748 with open(self._path, "w") as f:
2748 2749 f.write(data)
@@ -1,1788 +1,1808
1 1 # subrepo.py - sub-repository classes and factory
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import posixpath
15 15 import re
16 16 import stat
17 17 import subprocess
18 18 import sys
19 19 import tarfile
20 20 import xml.dom.minidom
21 21
22 22 from .i18n import _
23 23 from . import (
24 24 cmdutil,
25 25 encoding,
26 26 error,
27 27 exchange,
28 28 logcmdutil,
29 29 match as matchmod,
30 30 node,
31 31 pathutil,
32 32 phases,
33 33 pycompat,
34 34 scmutil,
35 35 subrepoutil,
36 36 util,
37 37 vfs as vfsmod,
38 38 )
39 39 from .utils import dateutil
40 40
41 41 hg = None
42 42 reporelpath = subrepoutil.reporelpath
43 43 subrelpath = subrepoutil.subrelpath
44 44 _abssource = subrepoutil._abssource
45 45 propertycache = util.propertycache
46 46
47 47 def _expandedabspath(path):
48 48 '''
49 49 get a path or url and if it is a path expand it and return an absolute path
50 50 '''
51 51 expandedpath = util.urllocalpath(util.expandpath(path))
52 52 u = util.url(expandedpath)
53 53 if not u.scheme:
54 54 path = util.normpath(os.path.abspath(u.path))
55 55 return path
56 56
57 57 def _getstorehashcachename(remotepath):
58 58 '''get a unique filename for the store hash cache of a remote repository'''
59 59 return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12]
60 60
61 61 class SubrepoAbort(error.Abort):
62 62 """Exception class used to avoid handling a subrepo error more than once"""
63 63 def __init__(self, *args, **kw):
64 64 self.subrepo = kw.pop(r'subrepo', None)
65 65 self.cause = kw.pop(r'cause', None)
66 66 error.Abort.__init__(self, *args, **kw)
67 67
68 68 def annotatesubrepoerror(func):
69 69 def decoratedmethod(self, *args, **kargs):
70 70 try:
71 71 res = func(self, *args, **kargs)
72 72 except SubrepoAbort as ex:
73 73 # This exception has already been handled
74 74 raise ex
75 75 except error.Abort as ex:
76 76 subrepo = subrelpath(self)
77 77 errormsg = (util.forcebytestr(ex) + ' '
78 78 + _('(in subrepository "%s")') % subrepo)
79 79 # avoid handling this exception by raising a SubrepoAbort exception
80 80 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
81 81 cause=sys.exc_info())
82 82 return res
83 83 return decoratedmethod
84 84
85 85 def _updateprompt(ui, sub, dirty, local, remote):
86 86 if dirty:
87 87 msg = (_(' subrepository sources for %s differ\n'
88 88 'use (l)ocal source (%s) or (r)emote source (%s)?'
89 89 '$$ &Local $$ &Remote')
90 90 % (subrelpath(sub), local, remote))
91 91 else:
92 92 msg = (_(' subrepository sources for %s differ (in checked out '
93 93 'version)\n'
94 94 'use (l)ocal source (%s) or (r)emote source (%s)?'
95 95 '$$ &Local $$ &Remote')
96 96 % (subrelpath(sub), local, remote))
97 97 return ui.promptchoice(msg, 0)
98 98
99 99 def _sanitize(ui, vfs, ignore):
100 100 for dirname, dirs, names in vfs.walk():
101 101 for i, d in enumerate(dirs):
102 102 if d.lower() == ignore:
103 103 del dirs[i]
104 104 break
105 105 if vfs.basename(dirname).lower() != '.hg':
106 106 continue
107 107 for f in names:
108 108 if f.lower() == 'hgrc':
109 109 ui.warn(_("warning: removing potentially hostile 'hgrc' "
110 110 "in '%s'\n") % vfs.join(dirname))
111 111 vfs.unlink(vfs.reljoin(dirname, f))
112 112
113 113 def _auditsubrepopath(repo, path):
114 114 # auditor doesn't check if the path itself is a symlink
115 115 pathutil.pathauditor(repo.root)(path)
116 116 if repo.wvfs.islink(path):
117 117 raise error.Abort(_("subrepo '%s' traverses symbolic link") % path)
118 118
119 119 SUBREPO_ALLOWED_DEFAULTS = {
120 120 'hg': True,
121 121 'git': False,
122 122 'svn': False,
123 123 }
124 124
125 125 def _checktype(ui, kind):
126 126 # subrepos.allowed is a master kill switch. If disabled, subrepos are
127 127 # disabled period.
128 128 if not ui.configbool('subrepos', 'allowed', True):
129 129 raise error.Abort(_('subrepos not enabled'),
130 130 hint=_("see 'hg help config.subrepos' for details"))
131 131
132 132 default = SUBREPO_ALLOWED_DEFAULTS.get(kind, False)
133 133 if not ui.configbool('subrepos', '%s:allowed' % kind, default):
134 134 raise error.Abort(_('%s subrepos not allowed') % kind,
135 135 hint=_("see 'hg help config.subrepos' for details"))
136 136
137 137 if kind not in types:
138 138 raise error.Abort(_('unknown subrepo type %s') % kind)
139 139
140 140 def subrepo(ctx, path, allowwdir=False, allowcreate=True):
141 141 """return instance of the right subrepo class for subrepo in path"""
142 142 # subrepo inherently violates our import layering rules
143 143 # because it wants to make repo objects from deep inside the stack
144 144 # so we manually delay the circular imports to not break
145 145 # scripts that don't use our demand-loading
146 146 global hg
147 147 from . import hg as h
148 148 hg = h
149 149
150 150 repo = ctx.repo()
151 151 _auditsubrepopath(repo, path)
152 152 state = ctx.substate[path]
153 153 _checktype(repo.ui, state[2])
154 154 if allowwdir:
155 155 state = (state[0], ctx.subrev(path), state[2])
156 156 return types[state[2]](ctx, path, state[:2], allowcreate)
157 157
158 158 def nullsubrepo(ctx, path, pctx):
159 159 """return an empty subrepo in pctx for the extant subrepo in ctx"""
160 160 # subrepo inherently violates our import layering rules
161 161 # because it wants to make repo objects from deep inside the stack
162 162 # so we manually delay the circular imports to not break
163 163 # scripts that don't use our demand-loading
164 164 global hg
165 165 from . import hg as h
166 166 hg = h
167 167
168 168 repo = ctx.repo()
169 169 _auditsubrepopath(repo, path)
170 170 state = ctx.substate[path]
171 171 _checktype(repo.ui, state[2])
172 172 subrev = ''
173 173 if state[2] == 'hg':
174 174 subrev = "0" * 40
175 175 return types[state[2]](pctx, path, (state[0], subrev), True)
176 176
177 177 # subrepo classes need to implement the following abstract class:
178 178
179 179 class abstractsubrepo(object):
180 180
181 181 def __init__(self, ctx, path):
182 182 """Initialize abstractsubrepo part
183 183
184 184 ``ctx`` is the context referring this subrepository in the
185 185 parent repository.
186 186
187 187 ``path`` is the path to this subrepository as seen from
188 188 innermost repository.
189 189 """
190 190 self.ui = ctx.repo().ui
191 191 self._ctx = ctx
192 192 self._path = path
193 193
194 194 def addwebdirpath(self, serverpath, webconf):
195 195 """Add the hgwebdir entries for this subrepo, and any of its subrepos.
196 196
197 197 ``serverpath`` is the path component of the URL for this repo.
198 198
199 199 ``webconf`` is the dictionary of hgwebdir entries.
200 200 """
201 201 pass
202 202
203 203 def storeclean(self, path):
204 204 """
205 205 returns true if the repository has not changed since it was last
206 206 cloned from or pushed to a given repository.
207 207 """
208 208 return False
209 209
210 210 def dirty(self, ignoreupdate=False, missing=False):
211 211 """returns true if the dirstate of the subrepo is dirty or does not
212 212 match current stored state. If ignoreupdate is true, only check
213 213 whether the subrepo has uncommitted changes in its dirstate. If missing
214 214 is true, check for deleted files.
215 215 """
216 216 raise NotImplementedError
217 217
218 218 def dirtyreason(self, ignoreupdate=False, missing=False):
219 219 """return reason string if it is ``dirty()``
220 220
221 221 Returned string should have enough information for the message
222 222 of exception.
223 223
224 224 This returns None, otherwise.
225 225 """
226 226 if self.dirty(ignoreupdate=ignoreupdate, missing=missing):
227 227 return _('uncommitted changes in subrepository "%s"'
228 228 ) % subrelpath(self)
229 229
230 230 def bailifchanged(self, ignoreupdate=False, hint=None):
231 231 """raise Abort if subrepository is ``dirty()``
232 232 """
233 233 dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate,
234 234 missing=True)
235 235 if dirtyreason:
236 236 raise error.Abort(dirtyreason, hint=hint)
237 237
238 238 def basestate(self):
239 239 """current working directory base state, disregarding .hgsubstate
240 240 state and working directory modifications"""
241 241 raise NotImplementedError
242 242
243 243 def checknested(self, path):
244 244 """check if path is a subrepository within this repository"""
245 245 return False
246 246
247 247 def commit(self, text, user, date):
248 248 """commit the current changes to the subrepo with the given
249 249 log message. Use given user and date if possible. Return the
250 250 new state of the subrepo.
251 251 """
252 252 raise NotImplementedError
253 253
254 254 def phase(self, state):
255 255 """returns phase of specified state in the subrepository.
256 256 """
257 257 return phases.public
258 258
259 259 def remove(self):
260 260 """remove the subrepo
261 261
262 262 (should verify the dirstate is not dirty first)
263 263 """
264 264 raise NotImplementedError
265 265
266 266 def get(self, state, overwrite=False):
267 267 """run whatever commands are needed to put the subrepo into
268 268 this state
269 269 """
270 270 raise NotImplementedError
271 271
272 272 def merge(self, state):
273 273 """merge currently-saved state with the new state."""
274 274 raise NotImplementedError
275 275
276 276 def push(self, opts):
277 277 """perform whatever action is analogous to 'hg push'
278 278
279 279 This may be a no-op on some systems.
280 280 """
281 281 raise NotImplementedError
282 282
283 283 def add(self, ui, match, prefix, explicitonly, **opts):
284 284 return []
285 285
286 286 def addremove(self, matcher, prefix, opts, dry_run, similarity):
287 287 self.ui.warn("%s: %s" % (prefix, _("addremove is not supported")))
288 288 return 1
289 289
290 290 def cat(self, match, fm, fntemplate, prefix, **opts):
291 291 return 1
292 292
293 293 def status(self, rev2, **opts):
294 294 return scmutil.status([], [], [], [], [], [], [])
295 295
296 296 def diff(self, ui, diffopts, node2, match, prefix, **opts):
297 297 pass
298 298
299 299 def outgoing(self, ui, dest, opts):
300 300 return 1
301 301
302 302 def incoming(self, ui, source, opts):
303 303 return 1
304 304
305 305 def files(self):
306 306 """return filename iterator"""
307 307 raise NotImplementedError
308 308
309 309 def filedata(self, name, decode):
310 310 """return file data, optionally passed through repo decoders"""
311 311 raise NotImplementedError
312 312
313 313 def fileflags(self, name):
314 314 """return file flags"""
315 315 return ''
316 316
317 317 def getfileset(self, expr):
318 318 """Resolve the fileset expression for this repo"""
319 319 return set()
320 320
321 321 def printfiles(self, ui, m, fm, fmt, subrepos):
322 322 """handle the files command for this subrepo"""
323 323 return 1
324 324
325 325 def archive(self, archiver, prefix, match=None, decode=True):
326 326 if match is not None:
327 327 files = [f for f in self.files() if match(f)]
328 328 else:
329 329 files = self.files()
330 330 total = len(files)
331 331 relpath = subrelpath(self)
332 332 self.ui.progress(_('archiving (%s)') % relpath, 0,
333 333 unit=_('files'), total=total)
334 334 for i, name in enumerate(files):
335 335 flags = self.fileflags(name)
336 336 mode = 'x' in flags and 0o755 or 0o644
337 337 symlink = 'l' in flags
338 338 archiver.addfile(prefix + self._path + '/' + name,
339 339 mode, symlink, self.filedata(name, decode))
340 340 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
341 341 unit=_('files'), total=total)
342 342 self.ui.progress(_('archiving (%s)') % relpath, None)
343 343 return total
344 344
345 345 def walk(self, match):
346 346 '''
347 347 walk recursively through the directory tree, finding all files
348 348 matched by the match function
349 349 '''
350 350
351 351 def forget(self, match, prefix):
352 352 return ([], [])
353 353
354 354 def removefiles(self, matcher, prefix, after, force, subrepos, warnings):
355 355 """remove the matched files from the subrepository and the filesystem,
356 356 possibly by force and/or after the file has been removed from the
357 357 filesystem. Return 0 on success, 1 on any warning.
358 358 """
359 359 warnings.append(_("warning: removefiles not implemented (%s)")
360 360 % self._path)
361 361 return 1
362 362
363 363 def revert(self, substate, *pats, **opts):
364 364 self.ui.warn(_('%s: reverting %s subrepos is unsupported\n') \
365 365 % (substate[0], substate[2]))
366 366 return []
367 367
368 368 def shortid(self, revid):
369 369 return revid
370 370
371 371 def unshare(self):
372 372 '''
373 373 convert this repository from shared to normal storage.
374 374 '''
375 375
376 376 def verify(self):
377 377 '''verify the integrity of the repository. Return 0 on success or
378 378 warning, 1 on any error.
379 379 '''
380 380 return 0
381 381
382 382 @propertycache
383 383 def wvfs(self):
384 384 """return vfs to access the working directory of this subrepository
385 385 """
386 386 return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path))
387 387
388 388 @propertycache
389 389 def _relpath(self):
390 390 """return path to this subrepository as seen from outermost repository
391 391 """
392 392 return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path)
393 393
394 394 class hgsubrepo(abstractsubrepo):
395 395 def __init__(self, ctx, path, state, allowcreate):
396 396 super(hgsubrepo, self).__init__(ctx, path)
397 397 self._state = state
398 398 r = ctx.repo()
399 399 root = r.wjoin(path)
400 400 create = allowcreate and not r.wvfs.exists('%s/.hg' % path)
401 401 self._repo = hg.repository(r.baseui, root, create=create)
402 402
403 403 # Propagate the parent's --hidden option
404 404 if r is r.unfiltered():
405 405 self._repo = self._repo.unfiltered()
406 406
407 407 self.ui = self._repo.ui
408 408 for s, k in [('ui', 'commitsubrepos')]:
409 409 v = r.ui.config(s, k)
410 410 if v:
411 411 self.ui.setconfig(s, k, v, 'subrepo')
412 412 # internal config: ui._usedassubrepo
413 413 self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
414 414 self._initrepo(r, state[0], create)
415 415
416 416 @annotatesubrepoerror
417 417 def addwebdirpath(self, serverpath, webconf):
418 418 cmdutil.addwebdirpath(self._repo, subrelpath(self), webconf)
419 419
420 420 def storeclean(self, path):
421 421 with self._repo.lock():
422 422 return self._storeclean(path)
423 423
424 424 def _storeclean(self, path):
425 425 clean = True
426 426 itercache = self._calcstorehash(path)
427 427 for filehash in self._readstorehashcache(path):
428 428 if filehash != next(itercache, None):
429 429 clean = False
430 430 break
431 431 if clean:
432 432 # if not empty:
433 433 # the cached and current pull states have a different size
434 434 clean = next(itercache, None) is None
435 435 return clean
436 436
437 437 def _calcstorehash(self, remotepath):
438 438 '''calculate a unique "store hash"
439 439
440 440 This method is used to to detect when there are changes that may
441 441 require a push to a given remote path.'''
442 442 # sort the files that will be hashed in increasing (likely) file size
443 443 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
444 444 yield '# %s\n' % _expandedabspath(remotepath)
445 445 vfs = self._repo.vfs
446 446 for relname in filelist:
447 447 filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest())
448 448 yield '%s = %s\n' % (relname, filehash)
449 449
450 450 @propertycache
451 451 def _cachestorehashvfs(self):
452 452 return vfsmod.vfs(self._repo.vfs.join('cache/storehash'))
453 453
454 454 def _readstorehashcache(self, remotepath):
455 455 '''read the store hash cache for a given remote repository'''
456 456 cachefile = _getstorehashcachename(remotepath)
457 457 return self._cachestorehashvfs.tryreadlines(cachefile, 'r')
458 458
459 459 def _cachestorehash(self, remotepath):
460 460 '''cache the current store hash
461 461
462 462 Each remote repo requires its own store hash cache, because a subrepo
463 463 store may be "clean" versus a given remote repo, but not versus another
464 464 '''
465 465 cachefile = _getstorehashcachename(remotepath)
466 466 with self._repo.lock():
467 467 storehash = list(self._calcstorehash(remotepath))
468 468 vfs = self._cachestorehashvfs
469 469 vfs.writelines(cachefile, storehash, mode='wb', notindexed=True)
470 470
471 471 def _getctx(self):
472 472 '''fetch the context for this subrepo revision, possibly a workingctx
473 473 '''
474 474 if self._ctx.rev() is None:
475 475 return self._repo[None] # workingctx if parent is workingctx
476 476 else:
477 477 rev = self._state[1]
478 478 return self._repo[rev]
479 479
480 480 @annotatesubrepoerror
481 481 def _initrepo(self, parentrepo, source, create):
482 482 self._repo._subparent = parentrepo
483 483 self._repo._subsource = source
484 484
485 485 if create:
486 486 lines = ['[paths]\n']
487 487
488 488 def addpathconfig(key, value):
489 489 if value:
490 490 lines.append('%s = %s\n' % (key, value))
491 491 self.ui.setconfig('paths', key, value, 'subrepo')
492 492
493 493 defpath = _abssource(self._repo, abort=False)
494 494 defpushpath = _abssource(self._repo, True, abort=False)
495 495 addpathconfig('default', defpath)
496 496 if defpath != defpushpath:
497 497 addpathconfig('default-push', defpushpath)
498 498
499 499 self._repo.vfs.write('hgrc', util.tonativeeol(''.join(lines)))
500 500
501 501 @annotatesubrepoerror
502 502 def add(self, ui, match, prefix, explicitonly, **opts):
503 503 return cmdutil.add(ui, self._repo, match,
504 504 self.wvfs.reljoin(prefix, self._path),
505 505 explicitonly, **opts)
506 506
507 507 @annotatesubrepoerror
508 508 def addremove(self, m, prefix, opts, dry_run, similarity):
509 509 # In the same way as sub directories are processed, once in a subrepo,
510 510 # always entry any of its subrepos. Don't corrupt the options that will
511 511 # be used to process sibling subrepos however.
512 512 opts = copy.copy(opts)
513 513 opts['subrepos'] = True
514 514 return scmutil.addremove(self._repo, m,
515 515 self.wvfs.reljoin(prefix, self._path), opts,
516 516 dry_run, similarity)
517 517
518 518 @annotatesubrepoerror
519 519 def cat(self, match, fm, fntemplate, prefix, **opts):
520 520 rev = self._state[1]
521 521 ctx = self._repo[rev]
522 522 return cmdutil.cat(self.ui, self._repo, ctx, match, fm, fntemplate,
523 523 prefix, **opts)
524 524
525 525 @annotatesubrepoerror
526 526 def status(self, rev2, **opts):
527 527 try:
528 528 rev1 = self._state[1]
529 529 ctx1 = self._repo[rev1]
530 530 ctx2 = self._repo[rev2]
531 531 return self._repo.status(ctx1, ctx2, **opts)
532 532 except error.RepoLookupError as inst:
533 533 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
534 534 % (inst, subrelpath(self)))
535 535 return scmutil.status([], [], [], [], [], [], [])
536 536
537 537 @annotatesubrepoerror
538 538 def diff(self, ui, diffopts, node2, match, prefix, **opts):
539 539 try:
540 540 node1 = node.bin(self._state[1])
541 541 # We currently expect node2 to come from substate and be
542 542 # in hex format
543 543 if node2 is not None:
544 544 node2 = node.bin(node2)
545 545 logcmdutil.diffordiffstat(ui, self._repo, diffopts,
546 546 node1, node2, match,
547 547 prefix=posixpath.join(prefix, self._path),
548 548 listsubrepos=True, **opts)
549 549 except error.RepoLookupError as inst:
550 550 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
551 551 % (inst, subrelpath(self)))
552 552
553 553 @annotatesubrepoerror
554 554 def archive(self, archiver, prefix, match=None, decode=True):
555 555 self._get(self._state + ('hg',))
556 556 files = self.files()
557 557 if match:
558 558 files = [f for f in files if match(f)]
559 559 rev = self._state[1]
560 560 ctx = self._repo[rev]
561 561 scmutil.fileprefetchhooks(self._repo, ctx, files)
562 562 total = abstractsubrepo.archive(self, archiver, prefix, match)
563 563 for subpath in ctx.substate:
564 564 s = subrepo(ctx, subpath, True)
565 565 submatch = matchmod.subdirmatcher(subpath, match)
566 566 total += s.archive(archiver, prefix + self._path + '/', submatch,
567 567 decode)
568 568 return total
569 569
570 570 @annotatesubrepoerror
571 571 def dirty(self, ignoreupdate=False, missing=False):
572 572 r = self._state[1]
573 573 if r == '' and not ignoreupdate: # no state recorded
574 574 return True
575 575 w = self._repo[None]
576 576 if r != w.p1().hex() and not ignoreupdate:
577 577 # different version checked out
578 578 return True
579 579 return w.dirty(missing=missing) # working directory changed
580 580
581 581 def basestate(self):
582 582 return self._repo['.'].hex()
583 583
584 584 def checknested(self, path):
585 585 return self._repo._checknested(self._repo.wjoin(path))
586 586
587 587 @annotatesubrepoerror
588 588 def commit(self, text, user, date):
589 589 # don't bother committing in the subrepo if it's only been
590 590 # updated
591 591 if not self.dirty(True):
592 592 return self._repo['.'].hex()
593 593 self.ui.debug("committing subrepo %s\n" % subrelpath(self))
594 594 n = self._repo.commit(text, user, date)
595 595 if not n:
596 596 return self._repo['.'].hex() # different version checked out
597 597 return node.hex(n)
598 598
599 599 @annotatesubrepoerror
600 600 def phase(self, state):
601 601 return self._repo[state].phase()
602 602
603 603 @annotatesubrepoerror
604 604 def remove(self):
605 605 # we can't fully delete the repository as it may contain
606 606 # local-only history
607 607 self.ui.note(_('removing subrepo %s\n') % subrelpath(self))
608 608 hg.clean(self._repo, node.nullid, False)
609 609
610 610 def _get(self, state):
611 611 source, revision, kind = state
612 612 parentrepo = self._repo._subparent
613 613
614 614 if revision in self._repo.unfiltered():
615 615 # Allow shared subrepos tracked at null to setup the sharedpath
616 616 if len(self._repo) != 0 or not parentrepo.shared():
617 617 return True
618 618 self._repo._subsource = source
619 619 srcurl = _abssource(self._repo)
620 620 other = hg.peer(self._repo, {}, srcurl)
621 621 if len(self._repo) == 0:
622 622 # use self._repo.vfs instead of self.wvfs to remove .hg only
623 623 self._repo.vfs.rmtree()
624 if parentrepo.shared():
624
625 # A remote subrepo could be shared if there is a local copy
626 # relative to the parent's share source. But clone pooling doesn't
627 # assemble the repos in a tree, so that can't be consistently done.
628 # A simpler option is for the user to configure clone pooling, and
629 # work with that.
630 if parentrepo.shared() and hg.islocal(srcurl):
625 631 self.ui.status(_('sharing subrepo %s from %s\n')
626 632 % (subrelpath(self), srcurl))
627 633 shared = hg.share(self._repo._subparent.baseui,
628 634 other, self._repo.root,
629 635 update=False, bookmarks=False)
630 636 self._repo = shared.local()
631 637 else:
638 # TODO: find a common place for this and this code in the
639 # share.py wrap of the clone command.
640 if parentrepo.shared():
641 pool = self.ui.config('share', 'pool')
642 if pool:
643 pool = util.expandpath(pool)
644
645 shareopts = {
646 'pool': pool,
647 'mode': self.ui.config('share', 'poolnaming'),
648 }
649 else:
650 shareopts = {}
651
632 652 self.ui.status(_('cloning subrepo %s from %s\n')
633 653 % (subrelpath(self), srcurl))
634 654 other, cloned = hg.clone(self._repo._subparent.baseui, {},
635 655 other, self._repo.root,
636 update=False)
656 update=False, shareopts=shareopts)
637 657 self._repo = cloned.local()
638 658 self._initrepo(parentrepo, source, create=True)
639 659 self._cachestorehash(srcurl)
640 660 else:
641 661 self.ui.status(_('pulling subrepo %s from %s\n')
642 662 % (subrelpath(self), srcurl))
643 663 cleansub = self.storeclean(srcurl)
644 664 exchange.pull(self._repo, other)
645 665 if cleansub:
646 666 # keep the repo clean after pull
647 667 self._cachestorehash(srcurl)
648 668 return False
649 669
650 670 @annotatesubrepoerror
651 671 def get(self, state, overwrite=False):
652 672 inrepo = self._get(state)
653 673 source, revision, kind = state
654 674 repo = self._repo
655 675 repo.ui.debug("getting subrepo %s\n" % self._path)
656 676 if inrepo:
657 677 urepo = repo.unfiltered()
658 678 ctx = urepo[revision]
659 679 if ctx.hidden():
660 680 urepo.ui.warn(
661 681 _('revision %s in subrepository "%s" is hidden\n') \
662 682 % (revision[0:12], self._path))
663 683 repo = urepo
664 684 hg.updaterepo(repo, revision, overwrite)
665 685
666 686 @annotatesubrepoerror
667 687 def merge(self, state):
668 688 self._get(state)
669 689 cur = self._repo['.']
670 690 dst = self._repo[state[1]]
671 691 anc = dst.ancestor(cur)
672 692
673 693 def mergefunc():
674 694 if anc == cur and dst.branch() == cur.branch():
675 695 self.ui.debug('updating subrepository "%s"\n'
676 696 % subrelpath(self))
677 697 hg.update(self._repo, state[1])
678 698 elif anc == dst:
679 699 self.ui.debug('skipping subrepository "%s"\n'
680 700 % subrelpath(self))
681 701 else:
682 702 self.ui.debug('merging subrepository "%s"\n' % subrelpath(self))
683 703 hg.merge(self._repo, state[1], remind=False)
684 704
685 705 wctx = self._repo[None]
686 706 if self.dirty():
687 707 if anc != dst:
688 708 if _updateprompt(self.ui, self, wctx.dirty(), cur, dst):
689 709 mergefunc()
690 710 else:
691 711 mergefunc()
692 712 else:
693 713 mergefunc()
694 714
695 715 @annotatesubrepoerror
696 716 def push(self, opts):
697 717 force = opts.get('force')
698 718 newbranch = opts.get('new_branch')
699 719 ssh = opts.get('ssh')
700 720
701 721 # push subrepos depth-first for coherent ordering
702 722 c = self._repo['']
703 723 subs = c.substate # only repos that are committed
704 724 for s in sorted(subs):
705 725 if c.sub(s).push(opts) == 0:
706 726 return False
707 727
708 728 dsturl = _abssource(self._repo, True)
709 729 if not force:
710 730 if self.storeclean(dsturl):
711 731 self.ui.status(
712 732 _('no changes made to subrepo %s since last push to %s\n')
713 733 % (subrelpath(self), dsturl))
714 734 return None
715 735 self.ui.status(_('pushing subrepo %s to %s\n') %
716 736 (subrelpath(self), dsturl))
717 737 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
718 738 res = exchange.push(self._repo, other, force, newbranch=newbranch)
719 739
720 740 # the repo is now clean
721 741 self._cachestorehash(dsturl)
722 742 return res.cgresult
723 743
724 744 @annotatesubrepoerror
725 745 def outgoing(self, ui, dest, opts):
726 746 if 'rev' in opts or 'branch' in opts:
727 747 opts = copy.copy(opts)
728 748 opts.pop('rev', None)
729 749 opts.pop('branch', None)
730 750 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
731 751
732 752 @annotatesubrepoerror
733 753 def incoming(self, ui, source, opts):
734 754 if 'rev' in opts or 'branch' in opts:
735 755 opts = copy.copy(opts)
736 756 opts.pop('rev', None)
737 757 opts.pop('branch', None)
738 758 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
739 759
740 760 @annotatesubrepoerror
741 761 def files(self):
742 762 rev = self._state[1]
743 763 ctx = self._repo[rev]
744 764 return ctx.manifest().keys()
745 765
746 766 def filedata(self, name, decode):
747 767 rev = self._state[1]
748 768 data = self._repo[rev][name].data()
749 769 if decode:
750 770 data = self._repo.wwritedata(name, data)
751 771 return data
752 772
753 773 def fileflags(self, name):
754 774 rev = self._state[1]
755 775 ctx = self._repo[rev]
756 776 return ctx.flags(name)
757 777
758 778 @annotatesubrepoerror
759 779 def printfiles(self, ui, m, fm, fmt, subrepos):
760 780 # If the parent context is a workingctx, use the workingctx here for
761 781 # consistency.
762 782 if self._ctx.rev() is None:
763 783 ctx = self._repo[None]
764 784 else:
765 785 rev = self._state[1]
766 786 ctx = self._repo[rev]
767 787 return cmdutil.files(ui, ctx, m, fm, fmt, subrepos)
768 788
769 789 @annotatesubrepoerror
770 790 def getfileset(self, expr):
771 791 if self._ctx.rev() is None:
772 792 ctx = self._repo[None]
773 793 else:
774 794 rev = self._state[1]
775 795 ctx = self._repo[rev]
776 796
777 797 files = ctx.getfileset(expr)
778 798
779 799 for subpath in ctx.substate:
780 800 sub = ctx.sub(subpath)
781 801
782 802 try:
783 803 files.extend(subpath + '/' + f for f in sub.getfileset(expr))
784 804 except error.LookupError:
785 805 self.ui.status(_("skipping missing subrepository: %s\n")
786 806 % self.wvfs.reljoin(reporelpath(self), subpath))
787 807 return files
788 808
789 809 def walk(self, match):
790 810 ctx = self._repo[None]
791 811 return ctx.walk(match)
792 812
793 813 @annotatesubrepoerror
794 814 def forget(self, match, prefix):
795 815 return cmdutil.forget(self.ui, self._repo, match,
796 816 self.wvfs.reljoin(prefix, self._path), True)
797 817
798 818 @annotatesubrepoerror
799 819 def removefiles(self, matcher, prefix, after, force, subrepos, warnings):
800 820 return cmdutil.remove(self.ui, self._repo, matcher,
801 821 self.wvfs.reljoin(prefix, self._path),
802 822 after, force, subrepos)
803 823
804 824 @annotatesubrepoerror
805 825 def revert(self, substate, *pats, **opts):
806 826 # reverting a subrepo is a 2 step process:
807 827 # 1. if the no_backup is not set, revert all modified
808 828 # files inside the subrepo
809 829 # 2. update the subrepo to the revision specified in
810 830 # the corresponding substate dictionary
811 831 self.ui.status(_('reverting subrepo %s\n') % substate[0])
812 832 if not opts.get(r'no_backup'):
813 833 # Revert all files on the subrepo, creating backups
814 834 # Note that this will not recursively revert subrepos
815 835 # We could do it if there was a set:subrepos() predicate
816 836 opts = opts.copy()
817 837 opts[r'date'] = None
818 838 opts[r'rev'] = substate[1]
819 839
820 840 self.filerevert(*pats, **opts)
821 841
822 842 # Update the repo to the revision specified in the given substate
823 843 if not opts.get(r'dry_run'):
824 844 self.get(substate, overwrite=True)
825 845
826 846 def filerevert(self, *pats, **opts):
827 847 ctx = self._repo[opts[r'rev']]
828 848 parents = self._repo.dirstate.parents()
829 849 if opts.get(r'all'):
830 850 pats = ['set:modified()']
831 851 else:
832 852 pats = []
833 853 cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
834 854
835 855 def shortid(self, revid):
836 856 return revid[:12]
837 857
838 858 @annotatesubrepoerror
839 859 def unshare(self):
840 860 # subrepo inherently violates our import layering rules
841 861 # because it wants to make repo objects from deep inside the stack
842 862 # so we manually delay the circular imports to not break
843 863 # scripts that don't use our demand-loading
844 864 global hg
845 865 from . import hg as h
846 866 hg = h
847 867
848 868 # Nothing prevents a user from sharing in a repo, and then making that a
849 869 # subrepo. Alternately, the previous unshare attempt may have failed
850 870 # part way through. So recurse whether or not this layer is shared.
851 871 if self._repo.shared():
852 872 self.ui.status(_("unsharing subrepo '%s'\n") % self._relpath)
853 873
854 874 hg.unshare(self.ui, self._repo)
855 875
856 876 def verify(self):
857 877 try:
858 878 rev = self._state[1]
859 879 ctx = self._repo.unfiltered()[rev]
860 880 if ctx.hidden():
861 881 # Since hidden revisions aren't pushed/pulled, it seems worth an
862 882 # explicit warning.
863 883 ui = self._repo.ui
864 884 ui.warn(_("subrepo '%s' is hidden in revision %s\n") %
865 885 (self._relpath, node.short(self._ctx.node())))
866 886 return 0
867 887 except error.RepoLookupError:
868 888 # A missing subrepo revision may be a case of needing to pull it, so
869 889 # don't treat this as an error.
870 890 self._repo.ui.warn(_("subrepo '%s' not found in revision %s\n") %
871 891 (self._relpath, node.short(self._ctx.node())))
872 892 return 0
873 893
874 894 @propertycache
875 895 def wvfs(self):
876 896 """return own wvfs for efficiency and consistency
877 897 """
878 898 return self._repo.wvfs
879 899
880 900 @propertycache
881 901 def _relpath(self):
882 902 """return path to this subrepository as seen from outermost repository
883 903 """
884 904 # Keep consistent dir separators by avoiding vfs.join(self._path)
885 905 return reporelpath(self._repo)
886 906
887 907 class svnsubrepo(abstractsubrepo):
888 908 def __init__(self, ctx, path, state, allowcreate):
889 909 super(svnsubrepo, self).__init__(ctx, path)
890 910 self._state = state
891 911 self._exe = util.findexe('svn')
892 912 if not self._exe:
893 913 raise error.Abort(_("'svn' executable not found for subrepo '%s'")
894 914 % self._path)
895 915
896 916 def _svncommand(self, commands, filename='', failok=False):
897 917 cmd = [self._exe]
898 918 extrakw = {}
899 919 if not self.ui.interactive():
900 920 # Making stdin be a pipe should prevent svn from behaving
901 921 # interactively even if we can't pass --non-interactive.
902 922 extrakw[r'stdin'] = subprocess.PIPE
903 923 # Starting in svn 1.5 --non-interactive is a global flag
904 924 # instead of being per-command, but we need to support 1.4 so
905 925 # we have to be intelligent about what commands take
906 926 # --non-interactive.
907 927 if commands[0] in ('update', 'checkout', 'commit'):
908 928 cmd.append('--non-interactive')
909 929 cmd.extend(commands)
910 930 if filename is not None:
911 931 path = self.wvfs.reljoin(self._ctx.repo().origroot,
912 932 self._path, filename)
913 933 cmd.append(path)
914 934 env = dict(encoding.environ)
915 935 # Avoid localized output, preserve current locale for everything else.
916 936 lc_all = env.get('LC_ALL')
917 937 if lc_all:
918 938 env['LANG'] = lc_all
919 939 del env['LC_ALL']
920 940 env['LC_MESSAGES'] = 'C'
921 941 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
922 942 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
923 943 universal_newlines=True, env=env, **extrakw)
924 944 stdout, stderr = p.communicate()
925 945 stderr = stderr.strip()
926 946 if not failok:
927 947 if p.returncode:
928 948 raise error.Abort(stderr or 'exited with code %d'
929 949 % p.returncode)
930 950 if stderr:
931 951 self.ui.warn(stderr + '\n')
932 952 return stdout, stderr
933 953
934 954 @propertycache
935 955 def _svnversion(self):
936 956 output, err = self._svncommand(['--version', '--quiet'], filename=None)
937 957 m = re.search(br'^(\d+)\.(\d+)', output)
938 958 if not m:
939 959 raise error.Abort(_('cannot retrieve svn tool version'))
940 960 return (int(m.group(1)), int(m.group(2)))
941 961
942 962 def _svnmissing(self):
943 963 return not self.wvfs.exists('.svn')
944 964
945 965 def _wcrevs(self):
946 966 # Get the working directory revision as well as the last
947 967 # commit revision so we can compare the subrepo state with
948 968 # both. We used to store the working directory one.
949 969 output, err = self._svncommand(['info', '--xml'])
950 970 doc = xml.dom.minidom.parseString(output)
951 971 entries = doc.getElementsByTagName('entry')
952 972 lastrev, rev = '0', '0'
953 973 if entries:
954 974 rev = str(entries[0].getAttribute('revision')) or '0'
955 975 commits = entries[0].getElementsByTagName('commit')
956 976 if commits:
957 977 lastrev = str(commits[0].getAttribute('revision')) or '0'
958 978 return (lastrev, rev)
959 979
960 980 def _wcrev(self):
961 981 return self._wcrevs()[0]
962 982
963 983 def _wcchanged(self):
964 984 """Return (changes, extchanges, missing) where changes is True
965 985 if the working directory was changed, extchanges is
966 986 True if any of these changes concern an external entry and missing
967 987 is True if any change is a missing entry.
968 988 """
969 989 output, err = self._svncommand(['status', '--xml'])
970 990 externals, changes, missing = [], [], []
971 991 doc = xml.dom.minidom.parseString(output)
972 992 for e in doc.getElementsByTagName('entry'):
973 993 s = e.getElementsByTagName('wc-status')
974 994 if not s:
975 995 continue
976 996 item = s[0].getAttribute('item')
977 997 props = s[0].getAttribute('props')
978 998 path = e.getAttribute('path')
979 999 if item == 'external':
980 1000 externals.append(path)
981 1001 elif item == 'missing':
982 1002 missing.append(path)
983 1003 if (item not in ('', 'normal', 'unversioned', 'external')
984 1004 or props not in ('', 'none', 'normal')):
985 1005 changes.append(path)
986 1006 for path in changes:
987 1007 for ext in externals:
988 1008 if path == ext or path.startswith(ext + pycompat.ossep):
989 1009 return True, True, bool(missing)
990 1010 return bool(changes), False, bool(missing)
991 1011
992 1012 @annotatesubrepoerror
993 1013 def dirty(self, ignoreupdate=False, missing=False):
994 1014 if self._svnmissing():
995 1015 return self._state[1] != ''
996 1016 wcchanged = self._wcchanged()
997 1017 changed = wcchanged[0] or (missing and wcchanged[2])
998 1018 if not changed:
999 1019 if self._state[1] in self._wcrevs() or ignoreupdate:
1000 1020 return False
1001 1021 return True
1002 1022
1003 1023 def basestate(self):
1004 1024 lastrev, rev = self._wcrevs()
1005 1025 if lastrev != rev:
1006 1026 # Last committed rev is not the same than rev. We would
1007 1027 # like to take lastrev but we do not know if the subrepo
1008 1028 # URL exists at lastrev. Test it and fallback to rev it
1009 1029 # is not there.
1010 1030 try:
1011 1031 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
1012 1032 return lastrev
1013 1033 except error.Abort:
1014 1034 pass
1015 1035 return rev
1016 1036
1017 1037 @annotatesubrepoerror
1018 1038 def commit(self, text, user, date):
1019 1039 # user and date are out of our hands since svn is centralized
1020 1040 changed, extchanged, missing = self._wcchanged()
1021 1041 if not changed:
1022 1042 return self.basestate()
1023 1043 if extchanged:
1024 1044 # Do not try to commit externals
1025 1045 raise error.Abort(_('cannot commit svn externals'))
1026 1046 if missing:
1027 1047 # svn can commit with missing entries but aborting like hg
1028 1048 # seems a better approach.
1029 1049 raise error.Abort(_('cannot commit missing svn entries'))
1030 1050 commitinfo, err = self._svncommand(['commit', '-m', text])
1031 1051 self.ui.status(commitinfo)
1032 1052 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1033 1053 if not newrev:
1034 1054 if not commitinfo.strip():
1035 1055 # Sometimes, our definition of "changed" differs from
1036 1056 # svn one. For instance, svn ignores missing files
1037 1057 # when committing. If there are only missing files, no
1038 1058 # commit is made, no output and no error code.
1039 1059 raise error.Abort(_('failed to commit svn changes'))
1040 1060 raise error.Abort(commitinfo.splitlines()[-1])
1041 1061 newrev = newrev.groups()[0]
1042 1062 self.ui.status(self._svncommand(['update', '-r', newrev])[0])
1043 1063 return newrev
1044 1064
1045 1065 @annotatesubrepoerror
1046 1066 def remove(self):
1047 1067 if self.dirty():
1048 1068 self.ui.warn(_('not removing repo %s because '
1049 1069 'it has changes.\n') % self._path)
1050 1070 return
1051 1071 self.ui.note(_('removing subrepo %s\n') % self._path)
1052 1072
1053 1073 self.wvfs.rmtree(forcibly=True)
1054 1074 try:
1055 1075 pwvfs = self._ctx.repo().wvfs
1056 1076 pwvfs.removedirs(pwvfs.dirname(self._path))
1057 1077 except OSError:
1058 1078 pass
1059 1079
1060 1080 @annotatesubrepoerror
1061 1081 def get(self, state, overwrite=False):
1062 1082 if overwrite:
1063 1083 self._svncommand(['revert', '--recursive'])
1064 1084 args = ['checkout']
1065 1085 if self._svnversion >= (1, 5):
1066 1086 args.append('--force')
1067 1087 # The revision must be specified at the end of the URL to properly
1068 1088 # update to a directory which has since been deleted and recreated.
1069 1089 args.append('%s@%s' % (state[0], state[1]))
1070 1090
1071 1091 # SEC: check that the ssh url is safe
1072 1092 util.checksafessh(state[0])
1073 1093
1074 1094 status, err = self._svncommand(args, failok=True)
1075 1095 _sanitize(self.ui, self.wvfs, '.svn')
1076 1096 if not re.search('Checked out revision [0-9]+.', status):
1077 1097 if ('is already a working copy for a different URL' in err
1078 1098 and (self._wcchanged()[:2] == (False, False))):
1079 1099 # obstructed but clean working copy, so just blow it away.
1080 1100 self.remove()
1081 1101 self.get(state, overwrite=False)
1082 1102 return
1083 1103 raise error.Abort((status or err).splitlines()[-1])
1084 1104 self.ui.status(status)
1085 1105
1086 1106 @annotatesubrepoerror
1087 1107 def merge(self, state):
1088 1108 old = self._state[1]
1089 1109 new = state[1]
1090 1110 wcrev = self._wcrev()
1091 1111 if new != wcrev:
1092 1112 dirty = old == wcrev or self._wcchanged()[0]
1093 1113 if _updateprompt(self.ui, self, dirty, wcrev, new):
1094 1114 self.get(state, False)
1095 1115
1096 1116 def push(self, opts):
1097 1117 # push is a no-op for SVN
1098 1118 return True
1099 1119
1100 1120 @annotatesubrepoerror
1101 1121 def files(self):
1102 1122 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1103 1123 doc = xml.dom.minidom.parseString(output)
1104 1124 paths = []
1105 1125 for e in doc.getElementsByTagName('entry'):
1106 1126 kind = str(e.getAttribute('kind'))
1107 1127 if kind != 'file':
1108 1128 continue
1109 1129 name = ''.join(c.data for c
1110 1130 in e.getElementsByTagName('name')[0].childNodes
1111 1131 if c.nodeType == c.TEXT_NODE)
1112 1132 paths.append(name.encode('utf-8'))
1113 1133 return paths
1114 1134
1115 1135 def filedata(self, name, decode):
1116 1136 return self._svncommand(['cat'], name)[0]
1117 1137
1118 1138
1119 1139 class gitsubrepo(abstractsubrepo):
1120 1140 def __init__(self, ctx, path, state, allowcreate):
1121 1141 super(gitsubrepo, self).__init__(ctx, path)
1122 1142 self._state = state
1123 1143 self._abspath = ctx.repo().wjoin(path)
1124 1144 self._subparent = ctx.repo()
1125 1145 self._ensuregit()
1126 1146
1127 1147 def _ensuregit(self):
1128 1148 try:
1129 1149 self._gitexecutable = 'git'
1130 1150 out, err = self._gitnodir(['--version'])
1131 1151 except OSError as e:
1132 1152 genericerror = _("error executing git for subrepo '%s': %s")
1133 1153 notfoundhint = _("check git is installed and in your PATH")
1134 1154 if e.errno != errno.ENOENT:
1135 1155 raise error.Abort(genericerror % (
1136 1156 self._path, encoding.strtolocal(e.strerror)))
1137 1157 elif pycompat.iswindows:
1138 1158 try:
1139 1159 self._gitexecutable = 'git.cmd'
1140 1160 out, err = self._gitnodir(['--version'])
1141 1161 except OSError as e2:
1142 1162 if e2.errno == errno.ENOENT:
1143 1163 raise error.Abort(_("couldn't find 'git' or 'git.cmd'"
1144 1164 " for subrepo '%s'") % self._path,
1145 1165 hint=notfoundhint)
1146 1166 else:
1147 1167 raise error.Abort(genericerror % (self._path,
1148 1168 encoding.strtolocal(e2.strerror)))
1149 1169 else:
1150 1170 raise error.Abort(_("couldn't find git for subrepo '%s'")
1151 1171 % self._path, hint=notfoundhint)
1152 1172 versionstatus = self._checkversion(out)
1153 1173 if versionstatus == 'unknown':
1154 1174 self.ui.warn(_('cannot retrieve git version\n'))
1155 1175 elif versionstatus == 'abort':
1156 1176 raise error.Abort(_('git subrepo requires at least 1.6.0 or later'))
1157 1177 elif versionstatus == 'warning':
1158 1178 self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1159 1179
1160 1180 @staticmethod
1161 1181 def _gitversion(out):
1162 1182 m = re.search(br'^git version (\d+)\.(\d+)\.(\d+)', out)
1163 1183 if m:
1164 1184 return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
1165 1185
1166 1186 m = re.search(br'^git version (\d+)\.(\d+)', out)
1167 1187 if m:
1168 1188 return (int(m.group(1)), int(m.group(2)), 0)
1169 1189
1170 1190 return -1
1171 1191
1172 1192 @staticmethod
1173 1193 def _checkversion(out):
1174 1194 '''ensure git version is new enough
1175 1195
1176 1196 >>> _checkversion = gitsubrepo._checkversion
1177 1197 >>> _checkversion(b'git version 1.6.0')
1178 1198 'ok'
1179 1199 >>> _checkversion(b'git version 1.8.5')
1180 1200 'ok'
1181 1201 >>> _checkversion(b'git version 1.4.0')
1182 1202 'abort'
1183 1203 >>> _checkversion(b'git version 1.5.0')
1184 1204 'warning'
1185 1205 >>> _checkversion(b'git version 1.9-rc0')
1186 1206 'ok'
1187 1207 >>> _checkversion(b'git version 1.9.0.265.g81cdec2')
1188 1208 'ok'
1189 1209 >>> _checkversion(b'git version 1.9.0.GIT')
1190 1210 'ok'
1191 1211 >>> _checkversion(b'git version 12345')
1192 1212 'unknown'
1193 1213 >>> _checkversion(b'no')
1194 1214 'unknown'
1195 1215 '''
1196 1216 version = gitsubrepo._gitversion(out)
1197 1217 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1198 1218 # despite the docstring comment. For now, error on 1.4.0, warn on
1199 1219 # 1.5.0 but attempt to continue.
1200 1220 if version == -1:
1201 1221 return 'unknown'
1202 1222 if version < (1, 5, 0):
1203 1223 return 'abort'
1204 1224 elif version < (1, 6, 0):
1205 1225 return 'warning'
1206 1226 return 'ok'
1207 1227
1208 1228 def _gitcommand(self, commands, env=None, stream=False):
1209 1229 return self._gitdir(commands, env=env, stream=stream)[0]
1210 1230
1211 1231 def _gitdir(self, commands, env=None, stream=False):
1212 1232 return self._gitnodir(commands, env=env, stream=stream,
1213 1233 cwd=self._abspath)
1214 1234
1215 1235 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1216 1236 """Calls the git command
1217 1237
1218 1238 The methods tries to call the git command. versions prior to 1.6.0
1219 1239 are not supported and very probably fail.
1220 1240 """
1221 1241 self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1222 1242 if env is None:
1223 1243 env = encoding.environ.copy()
1224 1244 # disable localization for Git output (issue5176)
1225 1245 env['LC_ALL'] = 'C'
1226 1246 # fix for Git CVE-2015-7545
1227 1247 if 'GIT_ALLOW_PROTOCOL' not in env:
1228 1248 env['GIT_ALLOW_PROTOCOL'] = 'file:git:http:https:ssh'
1229 1249 # unless ui.quiet is set, print git's stderr,
1230 1250 # which is mostly progress and useful info
1231 1251 errpipe = None
1232 1252 if self.ui.quiet:
1233 1253 errpipe = open(os.devnull, 'w')
1234 1254 if self.ui._colormode and len(commands) and commands[0] == "diff":
1235 1255 # insert the argument in the front,
1236 1256 # the end of git diff arguments is used for paths
1237 1257 commands.insert(1, '--color')
1238 1258 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1239 1259 cwd=cwd, env=env, close_fds=util.closefds,
1240 1260 stdout=subprocess.PIPE, stderr=errpipe)
1241 1261 if stream:
1242 1262 return p.stdout, None
1243 1263
1244 1264 retdata = p.stdout.read().strip()
1245 1265 # wait for the child to exit to avoid race condition.
1246 1266 p.wait()
1247 1267
1248 1268 if p.returncode != 0 and p.returncode != 1:
1249 1269 # there are certain error codes that are ok
1250 1270 command = commands[0]
1251 1271 if command in ('cat-file', 'symbolic-ref'):
1252 1272 return retdata, p.returncode
1253 1273 # for all others, abort
1254 1274 raise error.Abort(_('git %s error %d in %s') %
1255 1275 (command, p.returncode, self._relpath))
1256 1276
1257 1277 return retdata, p.returncode
1258 1278
1259 1279 def _gitmissing(self):
1260 1280 return not self.wvfs.exists('.git')
1261 1281
1262 1282 def _gitstate(self):
1263 1283 return self._gitcommand(['rev-parse', 'HEAD'])
1264 1284
1265 1285 def _gitcurrentbranch(self):
1266 1286 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1267 1287 if err:
1268 1288 current = None
1269 1289 return current
1270 1290
1271 1291 def _gitremote(self, remote):
1272 1292 out = self._gitcommand(['remote', 'show', '-n', remote])
1273 1293 line = out.split('\n')[1]
1274 1294 i = line.index('URL: ') + len('URL: ')
1275 1295 return line[i:]
1276 1296
1277 1297 def _githavelocally(self, revision):
1278 1298 out, code = self._gitdir(['cat-file', '-e', revision])
1279 1299 return code == 0
1280 1300
1281 1301 def _gitisancestor(self, r1, r2):
1282 1302 base = self._gitcommand(['merge-base', r1, r2])
1283 1303 return base == r1
1284 1304
1285 1305 def _gitisbare(self):
1286 1306 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1287 1307
1288 1308 def _gitupdatestat(self):
1289 1309 """This must be run before git diff-index.
1290 1310 diff-index only looks at changes to file stat;
1291 1311 this command looks at file contents and updates the stat."""
1292 1312 self._gitcommand(['update-index', '-q', '--refresh'])
1293 1313
1294 1314 def _gitbranchmap(self):
1295 1315 '''returns 2 things:
1296 1316 a map from git branch to revision
1297 1317 a map from revision to branches'''
1298 1318 branch2rev = {}
1299 1319 rev2branch = {}
1300 1320
1301 1321 out = self._gitcommand(['for-each-ref', '--format',
1302 1322 '%(objectname) %(refname)'])
1303 1323 for line in out.split('\n'):
1304 1324 revision, ref = line.split(' ')
1305 1325 if (not ref.startswith('refs/heads/') and
1306 1326 not ref.startswith('refs/remotes/')):
1307 1327 continue
1308 1328 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1309 1329 continue # ignore remote/HEAD redirects
1310 1330 branch2rev[ref] = revision
1311 1331 rev2branch.setdefault(revision, []).append(ref)
1312 1332 return branch2rev, rev2branch
1313 1333
1314 1334 def _gittracking(self, branches):
1315 1335 'return map of remote branch to local tracking branch'
1316 1336 # assumes no more than one local tracking branch for each remote
1317 1337 tracking = {}
1318 1338 for b in branches:
1319 1339 if b.startswith('refs/remotes/'):
1320 1340 continue
1321 1341 bname = b.split('/', 2)[2]
1322 1342 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1323 1343 if remote:
1324 1344 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1325 1345 tracking['refs/remotes/%s/%s' %
1326 1346 (remote, ref.split('/', 2)[2])] = b
1327 1347 return tracking
1328 1348
1329 1349 def _abssource(self, source):
1330 1350 if '://' not in source:
1331 1351 # recognize the scp syntax as an absolute source
1332 1352 colon = source.find(':')
1333 1353 if colon != -1 and '/' not in source[:colon]:
1334 1354 return source
1335 1355 self._subsource = source
1336 1356 return _abssource(self)
1337 1357
1338 1358 def _fetch(self, source, revision):
1339 1359 if self._gitmissing():
1340 1360 # SEC: check for safe ssh url
1341 1361 util.checksafessh(source)
1342 1362
1343 1363 source = self._abssource(source)
1344 1364 self.ui.status(_('cloning subrepo %s from %s\n') %
1345 1365 (self._relpath, source))
1346 1366 self._gitnodir(['clone', source, self._abspath])
1347 1367 if self._githavelocally(revision):
1348 1368 return
1349 1369 self.ui.status(_('pulling subrepo %s from %s\n') %
1350 1370 (self._relpath, self._gitremote('origin')))
1351 1371 # try only origin: the originally cloned repo
1352 1372 self._gitcommand(['fetch'])
1353 1373 if not self._githavelocally(revision):
1354 1374 raise error.Abort(_('revision %s does not exist in subrepository '
1355 1375 '"%s"\n') % (revision, self._relpath))
1356 1376
1357 1377 @annotatesubrepoerror
1358 1378 def dirty(self, ignoreupdate=False, missing=False):
1359 1379 if self._gitmissing():
1360 1380 return self._state[1] != ''
1361 1381 if self._gitisbare():
1362 1382 return True
1363 1383 if not ignoreupdate and self._state[1] != self._gitstate():
1364 1384 # different version checked out
1365 1385 return True
1366 1386 # check for staged changes or modified files; ignore untracked files
1367 1387 self._gitupdatestat()
1368 1388 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1369 1389 return code == 1
1370 1390
1371 1391 def basestate(self):
1372 1392 return self._gitstate()
1373 1393
1374 1394 @annotatesubrepoerror
1375 1395 def get(self, state, overwrite=False):
1376 1396 source, revision, kind = state
1377 1397 if not revision:
1378 1398 self.remove()
1379 1399 return
1380 1400 self._fetch(source, revision)
1381 1401 # if the repo was set to be bare, unbare it
1382 1402 if self._gitisbare():
1383 1403 self._gitcommand(['config', 'core.bare', 'false'])
1384 1404 if self._gitstate() == revision:
1385 1405 self._gitcommand(['reset', '--hard', 'HEAD'])
1386 1406 return
1387 1407 elif self._gitstate() == revision:
1388 1408 if overwrite:
1389 1409 # first reset the index to unmark new files for commit, because
1390 1410 # reset --hard will otherwise throw away files added for commit,
1391 1411 # not just unmark them.
1392 1412 self._gitcommand(['reset', 'HEAD'])
1393 1413 self._gitcommand(['reset', '--hard', 'HEAD'])
1394 1414 return
1395 1415 branch2rev, rev2branch = self._gitbranchmap()
1396 1416
1397 1417 def checkout(args):
1398 1418 cmd = ['checkout']
1399 1419 if overwrite:
1400 1420 # first reset the index to unmark new files for commit, because
1401 1421 # the -f option will otherwise throw away files added for
1402 1422 # commit, not just unmark them.
1403 1423 self._gitcommand(['reset', 'HEAD'])
1404 1424 cmd.append('-f')
1405 1425 self._gitcommand(cmd + args)
1406 1426 _sanitize(self.ui, self.wvfs, '.git')
1407 1427
1408 1428 def rawcheckout():
1409 1429 # no branch to checkout, check it out with no branch
1410 1430 self.ui.warn(_('checking out detached HEAD in '
1411 1431 'subrepository "%s"\n') % self._relpath)
1412 1432 self.ui.warn(_('check out a git branch if you intend '
1413 1433 'to make changes\n'))
1414 1434 checkout(['-q', revision])
1415 1435
1416 1436 if revision not in rev2branch:
1417 1437 rawcheckout()
1418 1438 return
1419 1439 branches = rev2branch[revision]
1420 1440 firstlocalbranch = None
1421 1441 for b in branches:
1422 1442 if b == 'refs/heads/master':
1423 1443 # master trumps all other branches
1424 1444 checkout(['refs/heads/master'])
1425 1445 return
1426 1446 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1427 1447 firstlocalbranch = b
1428 1448 if firstlocalbranch:
1429 1449 checkout([firstlocalbranch])
1430 1450 return
1431 1451
1432 1452 tracking = self._gittracking(branch2rev.keys())
1433 1453 # choose a remote branch already tracked if possible
1434 1454 remote = branches[0]
1435 1455 if remote not in tracking:
1436 1456 for b in branches:
1437 1457 if b in tracking:
1438 1458 remote = b
1439 1459 break
1440 1460
1441 1461 if remote not in tracking:
1442 1462 # create a new local tracking branch
1443 1463 local = remote.split('/', 3)[3]
1444 1464 checkout(['-b', local, remote])
1445 1465 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1446 1466 # When updating to a tracked remote branch,
1447 1467 # if the local tracking branch is downstream of it,
1448 1468 # a normal `git pull` would have performed a "fast-forward merge"
1449 1469 # which is equivalent to updating the local branch to the remote.
1450 1470 # Since we are only looking at branching at update, we need to
1451 1471 # detect this situation and perform this action lazily.
1452 1472 if tracking[remote] != self._gitcurrentbranch():
1453 1473 checkout([tracking[remote]])
1454 1474 self._gitcommand(['merge', '--ff', remote])
1455 1475 _sanitize(self.ui, self.wvfs, '.git')
1456 1476 else:
1457 1477 # a real merge would be required, just checkout the revision
1458 1478 rawcheckout()
1459 1479
1460 1480 @annotatesubrepoerror
1461 1481 def commit(self, text, user, date):
1462 1482 if self._gitmissing():
1463 1483 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1464 1484 cmd = ['commit', '-a', '-m', text]
1465 1485 env = encoding.environ.copy()
1466 1486 if user:
1467 1487 cmd += ['--author', user]
1468 1488 if date:
1469 1489 # git's date parser silently ignores when seconds < 1e9
1470 1490 # convert to ISO8601
1471 1491 env['GIT_AUTHOR_DATE'] = dateutil.datestr(date,
1472 1492 '%Y-%m-%dT%H:%M:%S %1%2')
1473 1493 self._gitcommand(cmd, env=env)
1474 1494 # make sure commit works otherwise HEAD might not exist under certain
1475 1495 # circumstances
1476 1496 return self._gitstate()
1477 1497
1478 1498 @annotatesubrepoerror
1479 1499 def merge(self, state):
1480 1500 source, revision, kind = state
1481 1501 self._fetch(source, revision)
1482 1502 base = self._gitcommand(['merge-base', revision, self._state[1]])
1483 1503 self._gitupdatestat()
1484 1504 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1485 1505
1486 1506 def mergefunc():
1487 1507 if base == revision:
1488 1508 self.get(state) # fast forward merge
1489 1509 elif base != self._state[1]:
1490 1510 self._gitcommand(['merge', '--no-commit', revision])
1491 1511 _sanitize(self.ui, self.wvfs, '.git')
1492 1512
1493 1513 if self.dirty():
1494 1514 if self._gitstate() != revision:
1495 1515 dirty = self._gitstate() == self._state[1] or code != 0
1496 1516 if _updateprompt(self.ui, self, dirty,
1497 1517 self._state[1][:7], revision[:7]):
1498 1518 mergefunc()
1499 1519 else:
1500 1520 mergefunc()
1501 1521
1502 1522 @annotatesubrepoerror
1503 1523 def push(self, opts):
1504 1524 force = opts.get('force')
1505 1525
1506 1526 if not self._state[1]:
1507 1527 return True
1508 1528 if self._gitmissing():
1509 1529 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1510 1530 # if a branch in origin contains the revision, nothing to do
1511 1531 branch2rev, rev2branch = self._gitbranchmap()
1512 1532 if self._state[1] in rev2branch:
1513 1533 for b in rev2branch[self._state[1]]:
1514 1534 if b.startswith('refs/remotes/origin/'):
1515 1535 return True
1516 1536 for b, revision in branch2rev.iteritems():
1517 1537 if b.startswith('refs/remotes/origin/'):
1518 1538 if self._gitisancestor(self._state[1], revision):
1519 1539 return True
1520 1540 # otherwise, try to push the currently checked out branch
1521 1541 cmd = ['push']
1522 1542 if force:
1523 1543 cmd.append('--force')
1524 1544
1525 1545 current = self._gitcurrentbranch()
1526 1546 if current:
1527 1547 # determine if the current branch is even useful
1528 1548 if not self._gitisancestor(self._state[1], current):
1529 1549 self.ui.warn(_('unrelated git branch checked out '
1530 1550 'in subrepository "%s"\n') % self._relpath)
1531 1551 return False
1532 1552 self.ui.status(_('pushing branch %s of subrepository "%s"\n') %
1533 1553 (current.split('/', 2)[2], self._relpath))
1534 1554 ret = self._gitdir(cmd + ['origin', current])
1535 1555 return ret[1] == 0
1536 1556 else:
1537 1557 self.ui.warn(_('no branch checked out in subrepository "%s"\n'
1538 1558 'cannot push revision %s\n') %
1539 1559 (self._relpath, self._state[1]))
1540 1560 return False
1541 1561
1542 1562 @annotatesubrepoerror
1543 1563 def add(self, ui, match, prefix, explicitonly, **opts):
1544 1564 if self._gitmissing():
1545 1565 return []
1546 1566
1547 1567 (modified, added, removed,
1548 1568 deleted, unknown, ignored, clean) = self.status(None, unknown=True,
1549 1569 clean=True)
1550 1570
1551 1571 tracked = set()
1552 1572 # dirstates 'amn' warn, 'r' is added again
1553 1573 for l in (modified, added, deleted, clean):
1554 1574 tracked.update(l)
1555 1575
1556 1576 # Unknown files not of interest will be rejected by the matcher
1557 1577 files = unknown
1558 1578 files.extend(match.files())
1559 1579
1560 1580 rejected = []
1561 1581
1562 1582 files = [f for f in sorted(set(files)) if match(f)]
1563 1583 for f in files:
1564 1584 exact = match.exact(f)
1565 1585 command = ["add"]
1566 1586 if exact:
1567 1587 command.append("-f") #should be added, even if ignored
1568 1588 if ui.verbose or not exact:
1569 1589 ui.status(_('adding %s\n') % match.rel(f))
1570 1590
1571 1591 if f in tracked: # hg prints 'adding' even if already tracked
1572 1592 if exact:
1573 1593 rejected.append(f)
1574 1594 continue
1575 1595 if not opts.get(r'dry_run'):
1576 1596 self._gitcommand(command + [f])
1577 1597
1578 1598 for f in rejected:
1579 1599 ui.warn(_("%s already tracked!\n") % match.abs(f))
1580 1600
1581 1601 return rejected
1582 1602
1583 1603 @annotatesubrepoerror
1584 1604 def remove(self):
1585 1605 if self._gitmissing():
1586 1606 return
1587 1607 if self.dirty():
1588 1608 self.ui.warn(_('not removing repo %s because '
1589 1609 'it has changes.\n') % self._relpath)
1590 1610 return
1591 1611 # we can't fully delete the repository as it may contain
1592 1612 # local-only history
1593 1613 self.ui.note(_('removing subrepo %s\n') % self._relpath)
1594 1614 self._gitcommand(['config', 'core.bare', 'true'])
1595 1615 for f, kind in self.wvfs.readdir():
1596 1616 if f == '.git':
1597 1617 continue
1598 1618 if kind == stat.S_IFDIR:
1599 1619 self.wvfs.rmtree(f)
1600 1620 else:
1601 1621 self.wvfs.unlink(f)
1602 1622
1603 1623 def archive(self, archiver, prefix, match=None, decode=True):
1604 1624 total = 0
1605 1625 source, revision = self._state
1606 1626 if not revision:
1607 1627 return total
1608 1628 self._fetch(source, revision)
1609 1629
1610 1630 # Parse git's native archive command.
1611 1631 # This should be much faster than manually traversing the trees
1612 1632 # and objects with many subprocess calls.
1613 1633 tarstream = self._gitcommand(['archive', revision], stream=True)
1614 1634 tar = tarfile.open(fileobj=tarstream, mode='r|')
1615 1635 relpath = subrelpath(self)
1616 1636 self.ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1617 1637 for i, info in enumerate(tar):
1618 1638 if info.isdir():
1619 1639 continue
1620 1640 if match and not match(info.name):
1621 1641 continue
1622 1642 if info.issym():
1623 1643 data = info.linkname
1624 1644 else:
1625 1645 data = tar.extractfile(info).read()
1626 1646 archiver.addfile(prefix + self._path + '/' + info.name,
1627 1647 info.mode, info.issym(), data)
1628 1648 total += 1
1629 1649 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
1630 1650 unit=_('files'))
1631 1651 self.ui.progress(_('archiving (%s)') % relpath, None)
1632 1652 return total
1633 1653
1634 1654
1635 1655 @annotatesubrepoerror
1636 1656 def cat(self, match, fm, fntemplate, prefix, **opts):
1637 1657 rev = self._state[1]
1638 1658 if match.anypats():
1639 1659 return 1 #No support for include/exclude yet
1640 1660
1641 1661 if not match.files():
1642 1662 return 1
1643 1663
1644 1664 # TODO: add support for non-plain formatter (see cmdutil.cat())
1645 1665 for f in match.files():
1646 1666 output = self._gitcommand(["show", "%s:%s" % (rev, f)])
1647 1667 fp = cmdutil.makefileobj(self._ctx, fntemplate,
1648 1668 pathname=self.wvfs.reljoin(prefix, f))
1649 1669 fp.write(output)
1650 1670 fp.close()
1651 1671 return 0
1652 1672
1653 1673
1654 1674 @annotatesubrepoerror
1655 1675 def status(self, rev2, **opts):
1656 1676 rev1 = self._state[1]
1657 1677 if self._gitmissing() or not rev1:
1658 1678 # if the repo is missing, return no results
1659 1679 return scmutil.status([], [], [], [], [], [], [])
1660 1680 modified, added, removed = [], [], []
1661 1681 self._gitupdatestat()
1662 1682 if rev2:
1663 1683 command = ['diff-tree', '--no-renames', '-r', rev1, rev2]
1664 1684 else:
1665 1685 command = ['diff-index', '--no-renames', rev1]
1666 1686 out = self._gitcommand(command)
1667 1687 for line in out.split('\n'):
1668 1688 tab = line.find('\t')
1669 1689 if tab == -1:
1670 1690 continue
1671 1691 status, f = line[tab - 1], line[tab + 1:]
1672 1692 if status == 'M':
1673 1693 modified.append(f)
1674 1694 elif status == 'A':
1675 1695 added.append(f)
1676 1696 elif status == 'D':
1677 1697 removed.append(f)
1678 1698
1679 1699 deleted, unknown, ignored, clean = [], [], [], []
1680 1700
1681 1701 command = ['status', '--porcelain', '-z']
1682 1702 if opts.get(r'unknown'):
1683 1703 command += ['--untracked-files=all']
1684 1704 if opts.get(r'ignored'):
1685 1705 command += ['--ignored']
1686 1706 out = self._gitcommand(command)
1687 1707
1688 1708 changedfiles = set()
1689 1709 changedfiles.update(modified)
1690 1710 changedfiles.update(added)
1691 1711 changedfiles.update(removed)
1692 1712 for line in out.split('\0'):
1693 1713 if not line:
1694 1714 continue
1695 1715 st = line[0:2]
1696 1716 #moves and copies show 2 files on one line
1697 1717 if line.find('\0') >= 0:
1698 1718 filename1, filename2 = line[3:].split('\0')
1699 1719 else:
1700 1720 filename1 = line[3:]
1701 1721 filename2 = None
1702 1722
1703 1723 changedfiles.add(filename1)
1704 1724 if filename2:
1705 1725 changedfiles.add(filename2)
1706 1726
1707 1727 if st == '??':
1708 1728 unknown.append(filename1)
1709 1729 elif st == '!!':
1710 1730 ignored.append(filename1)
1711 1731
1712 1732 if opts.get(r'clean'):
1713 1733 out = self._gitcommand(['ls-files'])
1714 1734 for f in out.split('\n'):
1715 1735 if not f in changedfiles:
1716 1736 clean.append(f)
1717 1737
1718 1738 return scmutil.status(modified, added, removed, deleted,
1719 1739 unknown, ignored, clean)
1720 1740
1721 1741 @annotatesubrepoerror
1722 1742 def diff(self, ui, diffopts, node2, match, prefix, **opts):
1723 1743 node1 = self._state[1]
1724 1744 cmd = ['diff', '--no-renames']
1725 1745 if opts[r'stat']:
1726 1746 cmd.append('--stat')
1727 1747 else:
1728 1748 # for Git, this also implies '-p'
1729 1749 cmd.append('-U%d' % diffopts.context)
1730 1750
1731 1751 gitprefix = self.wvfs.reljoin(prefix, self._path)
1732 1752
1733 1753 if diffopts.noprefix:
1734 1754 cmd.extend(['--src-prefix=%s/' % gitprefix,
1735 1755 '--dst-prefix=%s/' % gitprefix])
1736 1756 else:
1737 1757 cmd.extend(['--src-prefix=a/%s/' % gitprefix,
1738 1758 '--dst-prefix=b/%s/' % gitprefix])
1739 1759
1740 1760 if diffopts.ignorews:
1741 1761 cmd.append('--ignore-all-space')
1742 1762 if diffopts.ignorewsamount:
1743 1763 cmd.append('--ignore-space-change')
1744 1764 if self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4) \
1745 1765 and diffopts.ignoreblanklines:
1746 1766 cmd.append('--ignore-blank-lines')
1747 1767
1748 1768 cmd.append(node1)
1749 1769 if node2:
1750 1770 cmd.append(node2)
1751 1771
1752 1772 output = ""
1753 1773 if match.always():
1754 1774 output += self._gitcommand(cmd) + '\n'
1755 1775 else:
1756 1776 st = self.status(node2)[:3]
1757 1777 files = [f for sublist in st for f in sublist]
1758 1778 for f in files:
1759 1779 if match(f):
1760 1780 output += self._gitcommand(cmd + ['--', f]) + '\n'
1761 1781
1762 1782 if output.strip():
1763 1783 ui.write(output)
1764 1784
1765 1785 @annotatesubrepoerror
1766 1786 def revert(self, substate, *pats, **opts):
1767 1787 self.ui.status(_('reverting subrepo %s\n') % substate[0])
1768 1788 if not opts.get(r'no_backup'):
1769 1789 status = self.status(None)
1770 1790 names = status.modified
1771 1791 for name in names:
1772 1792 bakname = scmutil.origpath(self.ui, self._subparent, name)
1773 1793 self.ui.note(_('saving current version of %s as %s\n') %
1774 1794 (name, bakname))
1775 1795 self.wvfs.rename(name, bakname)
1776 1796
1777 1797 if not opts.get(r'dry_run'):
1778 1798 self.get(substate, overwrite=True)
1779 1799 return []
1780 1800
1781 1801 def shortid(self, revid):
1782 1802 return revid[:7]
1783 1803
1784 1804 types = {
1785 1805 'hg': hgsubrepo,
1786 1806 'svn': svnsubrepo,
1787 1807 'git': gitsubrepo,
1788 1808 }
@@ -1,1003 +1,1035
1 1 $ HGMERGE=true; export HGMERGE
2 2
3 3 init
4 4
5 5 $ hg init repo
6 6 $ cd repo
7 7
8 8 commit
9 9
10 10 $ echo 'a' > a
11 11 $ hg ci -A -m test -u nobody -d '1 0'
12 12 adding a
13 13
14 14 annotate -c
15 15
16 16 $ hg annotate -c a
17 17 8435f90966e4: a
18 18
19 19 annotate -cl
20 20
21 21 $ hg annotate -cl a
22 22 8435f90966e4:1: a
23 23
24 24 annotate -d
25 25
26 26 $ hg annotate -d a
27 27 Thu Jan 01 00:00:01 1970 +0000: a
28 28
29 29 annotate -n
30 30
31 31 $ hg annotate -n a
32 32 0: a
33 33
34 34 annotate -nl
35 35
36 36 $ hg annotate -nl a
37 37 0:1: a
38 38
39 39 annotate -u
40 40
41 41 $ hg annotate -u a
42 42 nobody: a
43 43
44 44 annotate -cdnu
45 45
46 46 $ hg annotate -cdnu a
47 47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
48 48
49 49 annotate -cdnul
50 50
51 51 $ hg annotate -cdnul a
52 52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
53 53
54 54 annotate (JSON)
55 55
56 56 $ hg annotate -Tjson a
57 57 [
58 58 {
59 59 "abspath": "a",
60 60 "lines": [{"line": "a\n", "rev": 0}],
61 61 "path": "a"
62 62 }
63 63 ]
64 64
65 65 $ hg annotate -Tjson -cdfnul a
66 66 [
67 67 {
68 68 "abspath": "a",
69 69 "lines": [{"date": [1.0, 0], "file": "a", "line": "a\n", "line_number": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "rev": 0, "user": "nobody"}],
70 70 "path": "a"
71 71 }
72 72 ]
73 73
74 74 $ cat <<EOF >>a
75 75 > a
76 76 > a
77 77 > EOF
78 78 $ hg ci -ma1 -d '1 0'
79 79 $ hg cp a b
80 80 $ hg ci -mb -d '1 0'
81 81 $ cat <<EOF >> b
82 82 > b4
83 83 > b5
84 84 > b6
85 85 > EOF
86 86 $ hg ci -mb2 -d '2 0'
87 87
88 88 annotate multiple files (JSON)
89 89
90 90 $ hg annotate -Tjson a b
91 91 [
92 92 {
93 93 "abspath": "a",
94 94 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}],
95 95 "path": "a"
96 96 },
97 97 {
98 98 "abspath": "b",
99 99 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}, {"line": "b4\n", "rev": 3}, {"line": "b5\n", "rev": 3}, {"line": "b6\n", "rev": 3}],
100 100 "path": "b"
101 101 }
102 102 ]
103 103
104 104 annotate multiple files (template)
105 105
106 106 $ hg annotate -T'== {abspath} ==\n{lines % "{rev}: {line}"}' a b
107 107 == a ==
108 108 0: a
109 109 1: a
110 110 1: a
111 111 == b ==
112 112 0: a
113 113 1: a
114 114 1: a
115 115 3: b4
116 116 3: b5
117 117 3: b6
118 118
119 119 annotate -n b
120 120
121 121 $ hg annotate -n b
122 122 0: a
123 123 1: a
124 124 1: a
125 125 3: b4
126 126 3: b5
127 127 3: b6
128 128
129 129 annotate --no-follow b
130 130
131 131 $ hg annotate --no-follow b
132 132 2: a
133 133 2: a
134 134 2: a
135 135 3: b4
136 136 3: b5
137 137 3: b6
138 138
139 139 annotate -nl b
140 140
141 141 $ hg annotate -nl b
142 142 0:1: a
143 143 1:2: a
144 144 1:3: a
145 145 3:4: b4
146 146 3:5: b5
147 147 3:6: b6
148 148
149 149 annotate -nf b
150 150
151 151 $ hg annotate -nf b
152 152 0 a: a
153 153 1 a: a
154 154 1 a: a
155 155 3 b: b4
156 156 3 b: b5
157 157 3 b: b6
158 158
159 159 annotate -nlf b
160 160
161 161 $ hg annotate -nlf b
162 162 0 a:1: a
163 163 1 a:2: a
164 164 1 a:3: a
165 165 3 b:4: b4
166 166 3 b:5: b5
167 167 3 b:6: b6
168 168
169 169 $ hg up -C 2
170 170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
171 171 $ cat <<EOF >> b
172 172 > b4
173 173 > c
174 174 > b5
175 175 > EOF
176 176 $ hg ci -mb2.1 -d '2 0'
177 177 created new head
178 178 $ hg merge
179 179 merging b
180 180 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
181 181 (branch merge, don't forget to commit)
182 182 $ hg ci -mmergeb -d '3 0'
183 183
184 184 annotate after merge
185 185
186 186 $ hg annotate -nf b
187 187 0 a: a
188 188 1 a: a
189 189 1 a: a
190 190 3 b: b4
191 191 4 b: c
192 192 3 b: b5
193 193
194 194 annotate after merge with -l
195 195
196 196 $ hg annotate -nlf b
197 197 0 a:1: a
198 198 1 a:2: a
199 199 1 a:3: a
200 200 3 b:4: b4
201 201 4 b:5: c
202 202 3 b:5: b5
203 203
204 204 $ hg up -C 1
205 205 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
206 206 $ hg cp a b
207 207 $ cat <<EOF > b
208 208 > a
209 209 > z
210 210 > a
211 211 > EOF
212 212 $ hg ci -mc -d '3 0'
213 213 created new head
214 214 $ hg merge
215 215 merging b
216 216 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
217 217 (branch merge, don't forget to commit)
218 218 $ cat <<EOF >> b
219 219 > b4
220 220 > c
221 221 > b5
222 222 > EOF
223 223 $ echo d >> b
224 224 $ hg ci -mmerge2 -d '4 0'
225 225
226 226 annotate after rename merge
227 227
228 228 $ hg annotate -nf b
229 229 0 a: a
230 230 6 b: z
231 231 1 a: a
232 232 3 b: b4
233 233 4 b: c
234 234 3 b: b5
235 235 7 b: d
236 236
237 237 annotate after rename merge with -l
238 238
239 239 $ hg annotate -nlf b
240 240 0 a:1: a
241 241 6 b:2: z
242 242 1 a:3: a
243 243 3 b:4: b4
244 244 4 b:5: c
245 245 3 b:5: b5
246 246 7 b:7: d
247 247
248 248 --skip nothing (should be the same as no --skip at all)
249 249
250 250 $ hg annotate -nlf b --skip '1::0'
251 251 0 a:1: a
252 252 6 b:2: z
253 253 1 a:3: a
254 254 3 b:4: b4
255 255 4 b:5: c
256 256 3 b:5: b5
257 257 7 b:7: d
258 258
259 259 --skip a modified line. Note a slight behavior difference in pure - this is
260 260 because the pure code comes up with slightly different deltas internally.
261 261
262 262 $ hg annotate -nlf b --skip 6
263 263 0 a:1: a
264 264 1 a:2* z (no-pure !)
265 265 0 a:1* z (pure !)
266 266 1 a:3: a
267 267 3 b:4: b4
268 268 4 b:5: c
269 269 3 b:5: b5
270 270 7 b:7: d
271 271
272 272 --skip added lines (and test multiple skip)
273 273
274 274 $ hg annotate -nlf b --skip 3
275 275 0 a:1: a
276 276 6 b:2: z
277 277 1 a:3: a
278 278 1 a:3* b4
279 279 4 b:5: c
280 280 1 a:3* b5
281 281 7 b:7: d
282 282
283 283 $ hg annotate -nlf b --skip 4
284 284 0 a:1: a
285 285 6 b:2: z
286 286 1 a:3: a
287 287 3 b:4: b4
288 288 1 a:3* c
289 289 3 b:5: b5
290 290 7 b:7: d
291 291
292 292 $ hg annotate -nlf b --skip 3 --skip 4
293 293 0 a:1: a
294 294 6 b:2: z
295 295 1 a:3: a
296 296 1 a:3* b4
297 297 1 a:3* c
298 298 1 a:3* b5
299 299 7 b:7: d
300 300
301 301 $ hg annotate -nlf b --skip 'merge()'
302 302 0 a:1: a
303 303 6 b:2: z
304 304 1 a:3: a
305 305 3 b:4: b4
306 306 4 b:5: c
307 307 3 b:5: b5
308 308 3 b:5* d
309 309
310 310 --skip everything -- use the revision the file was introduced in
311 311
312 312 $ hg annotate -nlf b --skip 'all()'
313 313 0 a:1: a
314 314 0 a:1* z
315 315 0 a:1* a
316 316 0 a:1* b4
317 317 0 a:1* c
318 318 0 a:1* b5
319 319 0 a:1* d
320 320
321 321 Issue2807: alignment of line numbers with -l
322 322
323 323 $ echo more >> b
324 324 $ hg ci -mmore -d '5 0'
325 325 $ echo more >> b
326 326 $ hg ci -mmore -d '6 0'
327 327 $ echo more >> b
328 328 $ hg ci -mmore -d '7 0'
329 329 $ hg annotate -nlf b
330 330 0 a: 1: a
331 331 6 b: 2: z
332 332 1 a: 3: a
333 333 3 b: 4: b4
334 334 4 b: 5: c
335 335 3 b: 5: b5
336 336 7 b: 7: d
337 337 8 b: 8: more
338 338 9 b: 9: more
339 339 10 b:10: more
340 340
341 341 linkrev vs rev
342 342
343 343 $ hg annotate -r tip -n a
344 344 0: a
345 345 1: a
346 346 1: a
347 347
348 348 linkrev vs rev with -l
349 349
350 350 $ hg annotate -r tip -nl a
351 351 0:1: a
352 352 1:2: a
353 353 1:3: a
354 354
355 355 Issue589: "undelete" sequence leads to crash
356 356
357 357 annotate was crashing when trying to --follow something
358 358
359 359 like A -> B -> A
360 360
361 361 generate ABA rename configuration
362 362
363 363 $ echo foo > foo
364 364 $ hg add foo
365 365 $ hg ci -m addfoo
366 366 $ hg rename foo bar
367 367 $ hg ci -m renamefoo
368 368 $ hg rename bar foo
369 369 $ hg ci -m renamebar
370 370
371 371 annotate after ABA with follow
372 372
373 373 $ hg annotate --follow foo
374 374 foo: foo
375 375
376 376 missing file
377 377
378 378 $ hg ann nosuchfile
379 379 abort: nosuchfile: no such file in rev e9e6b4fa872f
380 380 [255]
381 381
382 382 annotate file without '\n' on last line
383 383
384 384 $ printf "" > c
385 385 $ hg ci -A -m test -u nobody -d '1 0'
386 386 adding c
387 387 $ hg annotate c
388 388 $ printf "a\nb" > c
389 389 $ hg ci -m test
390 390 $ hg annotate c
391 391 [0-9]+: a (re)
392 392 [0-9]+: b (re)
393 393
394 394 Issue3841: check annotation of the file of which filelog includes
395 395 merging between the revision and its ancestor
396 396
397 397 to reproduce the situation with recent Mercurial, this script uses (1)
398 398 "hg debugsetparents" to merge without ancestor check by "hg merge",
399 399 and (2) the extension to allow filelog merging between the revision
400 400 and its ancestor by overriding "repo._filecommit".
401 401
402 402 $ cat > ../legacyrepo.py <<EOF
403 403 > from __future__ import absolute_import
404 404 > from mercurial import error, node
405 405 > def reposetup(ui, repo):
406 406 > class legacyrepo(repo.__class__):
407 407 > def _filecommit(self, fctx, manifest1, manifest2,
408 408 > linkrev, tr, changelist):
409 409 > fname = fctx.path()
410 410 > text = fctx.data()
411 411 > flog = self.file(fname)
412 412 > fparent1 = manifest1.get(fname, node.nullid)
413 413 > fparent2 = manifest2.get(fname, node.nullid)
414 414 > meta = {}
415 415 > copy = fctx.renamed()
416 416 > if copy and copy[0] != fname:
417 417 > raise error.Abort('copying is not supported')
418 418 > if fparent2 != node.nullid:
419 419 > changelist.append(fname)
420 420 > return flog.add(text, meta, tr, linkrev,
421 421 > fparent1, fparent2)
422 422 > raise error.Abort('only merging is supported')
423 423 > repo.__class__ = legacyrepo
424 424 > EOF
425 425
426 426 $ cat > baz <<EOF
427 427 > 1
428 428 > 2
429 429 > 3
430 430 > 4
431 431 > 5
432 432 > EOF
433 433 $ hg add baz
434 434 $ hg commit -m "baz:0"
435 435
436 436 $ cat > baz <<EOF
437 437 > 1 baz:1
438 438 > 2
439 439 > 3
440 440 > 4
441 441 > 5
442 442 > EOF
443 443 $ hg commit -m "baz:1"
444 444
445 445 $ cat > baz <<EOF
446 446 > 1 baz:1
447 447 > 2 baz:2
448 448 > 3
449 449 > 4
450 450 > 5
451 451 > EOF
452 452 $ hg debugsetparents 17 17
453 453 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
454 454 $ hg debugindexdot .hg/store/data/baz.i
455 455 digraph G {
456 456 -1 -> 0
457 457 0 -> 1
458 458 1 -> 2
459 459 1 -> 2
460 460 }
461 461 $ hg annotate baz
462 462 17: 1 baz:1
463 463 18: 2 baz:2
464 464 16: 3
465 465 16: 4
466 466 16: 5
467 467
468 468 $ cat > baz <<EOF
469 469 > 1 baz:1
470 470 > 2 baz:2
471 471 > 3 baz:3
472 472 > 4
473 473 > 5
474 474 > EOF
475 475 $ hg commit -m "baz:3"
476 476
477 477 $ cat > baz <<EOF
478 478 > 1 baz:1
479 479 > 2 baz:2
480 480 > 3 baz:3
481 481 > 4 baz:4
482 482 > 5
483 483 > EOF
484 484 $ hg debugsetparents 19 18
485 485 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
486 486 $ hg debugindexdot .hg/store/data/baz.i
487 487 digraph G {
488 488 -1 -> 0
489 489 0 -> 1
490 490 1 -> 2
491 491 1 -> 2
492 492 2 -> 3
493 493 3 -> 4
494 494 2 -> 4
495 495 }
496 496 $ hg annotate baz
497 497 17: 1 baz:1
498 498 18: 2 baz:2
499 499 19: 3 baz:3
500 500 20: 4 baz:4
501 501 16: 5
502 502
503 503 annotate clean file
504 504
505 505 $ hg annotate -ncr "wdir()" foo
506 506 11 472b18db256d : foo
507 507
508 508 annotate modified file
509 509
510 510 $ echo foofoo >> foo
511 511 $ hg annotate -r "wdir()" foo
512 512 11 : foo
513 513 20+: foofoo
514 514
515 515 $ hg annotate -cr "wdir()" foo
516 516 472b18db256d : foo
517 517 b6bedd5477e7+: foofoo
518 518
519 519 $ hg annotate -ncr "wdir()" foo
520 520 11 472b18db256d : foo
521 521 20 b6bedd5477e7+: foofoo
522 522
523 523 $ hg annotate --debug -ncr "wdir()" foo
524 524 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
525 525 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
526 526
527 527 $ hg annotate -udr "wdir()" foo
528 528 test Thu Jan 01 00:00:00 1970 +0000: foo
529 529 test [A-Za-z0-9:+ ]+: foofoo (re)
530 530
531 531 $ hg annotate -ncr "wdir()" -Tjson foo
532 532 [
533 533 {
534 534 "abspath": "foo",
535 535 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": null, "rev": null}],
536 536 "path": "foo"
537 537 }
538 538 ]
539 539
540 540 annotate added file
541 541
542 542 $ echo bar > bar
543 543 $ hg add bar
544 544 $ hg annotate -ncr "wdir()" bar
545 545 20 b6bedd5477e7+: bar
546 546
547 547 annotate renamed file
548 548
549 549 $ hg rename foo renamefoo2
550 550 $ hg annotate -ncr "wdir()" renamefoo2
551 551 11 472b18db256d : foo
552 552 20 b6bedd5477e7+: foofoo
553 553
554 554 annotate missing file
555 555
556 556 $ rm baz
557 557
558 558 $ hg annotate -ncr "wdir()" baz
559 559 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
560 560 abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
561 561 [255]
562 562
563 563 annotate removed file
564 564
565 565 $ hg rm baz
566 566
567 567 $ hg annotate -ncr "wdir()" baz
568 568 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
569 569 abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
570 570 [255]
571 571
572 572 $ hg revert --all --no-backup --quiet
573 573 $ hg id -n
574 574 20
575 575
576 576 Test followlines() revset; we usually check both followlines(pat, range) and
577 577 followlines(pat, range, descend=True) to make sure both give the same result
578 578 when they should.
579 579
580 580 $ echo a >> foo
581 581 $ hg ci -m 'foo: add a'
582 582 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
583 583 16: baz:0
584 584 19: baz:3
585 585 20: baz:4
586 586 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=20)'
587 587 16: baz:0
588 588 19: baz:3
589 589 20: baz:4
590 590 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19)'
591 591 16: baz:0
592 592 19: baz:3
593 593 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=True)'
594 594 19: baz:3
595 595 20: baz:4
596 596 $ printf "0\n0\n" | cat - baz > baz1
597 597 $ mv baz1 baz
598 598 $ hg ci -m 'added two lines with 0'
599 599 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
600 600 16: baz:0
601 601 19: baz:3
602 602 20: baz:4
603 603 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, descend=true, startrev=19)'
604 604 19: baz:3
605 605 20: baz:4
606 606 $ echo 6 >> baz
607 607 $ hg ci -m 'added line 8'
608 608 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
609 609 16: baz:0
610 610 19: baz:3
611 611 20: baz:4
612 612 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=1)'
613 613 19: baz:3
614 614 20: baz:4
615 615 $ sed 's/3/3+/' baz > baz.new
616 616 $ mv baz.new baz
617 617 $ hg ci -m 'baz:3->3+'
618 618 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, descend=0)'
619 619 16: baz:0
620 620 19: baz:3
621 621 20: baz:4
622 622 24: baz:3->3+
623 623 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=17, descend=True)'
624 624 19: baz:3
625 625 20: baz:4
626 626 24: baz:3->3+
627 627 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2, descend=false)'
628 628 22: added two lines with 0
629 629
630 630 file patterns are okay
631 631 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1:2)'
632 632 22: added two lines with 0
633 633
634 634 renames are followed
635 635 $ hg mv baz qux
636 636 $ sed 's/4/4+/' qux > qux.new
637 637 $ mv qux.new qux
638 638 $ hg ci -m 'qux:4->4+'
639 639 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
640 640 16: baz:0
641 641 19: baz:3
642 642 20: baz:4
643 643 24: baz:3->3+
644 644 25: qux:4->4+
645 645
646 646 but are missed when following children
647 647 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=22, descend=True)'
648 648 24: baz:3->3+
649 649
650 650 merge
651 651 $ hg up 24 --quiet
652 652 $ echo 7 >> baz
653 653 $ hg ci -m 'one more line, out of line range'
654 654 created new head
655 655 $ sed 's/3+/3-/' baz > baz.new
656 656 $ mv baz.new baz
657 657 $ hg ci -m 'baz:3+->3-'
658 658 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
659 659 16: baz:0
660 660 19: baz:3
661 661 20: baz:4
662 662 24: baz:3->3+
663 663 27: baz:3+->3-
664 664 $ hg merge 25
665 665 merging baz and qux to qux
666 666 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
667 667 (branch merge, don't forget to commit)
668 668 $ hg ci -m merge
669 669 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
670 670 16: baz:0
671 671 19: baz:3
672 672 20: baz:4
673 673 24: baz:3->3+
674 674 25: qux:4->4+
675 675 27: baz:3+->3-
676 676 28: merge
677 677 $ hg up 25 --quiet
678 678 $ hg merge 27
679 679 merging qux and baz to qux
680 680 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
681 681 (branch merge, don't forget to commit)
682 682 $ hg ci -m 'merge from other side'
683 683 created new head
684 684 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
685 685 16: baz:0
686 686 19: baz:3
687 687 20: baz:4
688 688 24: baz:3->3+
689 689 25: qux:4->4+
690 690 27: baz:3+->3-
691 691 29: merge from other side
692 692 $ hg up 24 --quiet
693 693
694 694 we are missing the branch with rename when following children
695 695 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=26, descend=True)'
696 696 27: baz:3+->3-
697 697
698 698 we follow all branches in descending direction
699 699 $ hg up 23 --quiet
700 700 $ sed 's/3/+3/' baz > baz.new
701 701 $ mv baz.new baz
702 702 $ hg ci -m 'baz:3->+3'
703 703 created new head
704 704 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 2:5, startrev=16, descend=True)' --graph
705 705 @ 30: baz:3->+3
706 706 :
707 707 : o 27: baz:3+->3-
708 708 : :
709 709 : o 24: baz:3->3+
710 710 :/
711 711 o 20: baz:4
712 712 |\
713 713 | o 19: baz:3
714 714 |/
715 715 o 18: baz:2
716 716 :
717 717 o 16: baz:0
718 718 |
719 719 ~
720 720
721 721 Issue5595: on a merge changeset with different line ranges depending on
722 722 parent, be conservative and use the surrounding interval to avoid loosing
723 723 track of possible further descendants in specified range.
724 724
725 725 $ hg up 23 --quiet
726 726 $ hg cat baz -r 24
727 727 0
728 728 0
729 729 1 baz:1
730 730 2 baz:2
731 731 3+ baz:3
732 732 4 baz:4
733 733 5
734 734 6
735 735 $ cat > baz << EOF
736 736 > 0
737 737 > 0
738 738 > a
739 739 > b
740 740 > 3+ baz:3
741 741 > 4 baz:4
742 742 > y
743 743 > z
744 744 > EOF
745 745 $ hg ci -m 'baz: mostly rewrite with some content from 24'
746 746 created new head
747 747 $ hg merge --tool :merge-other 24
748 748 merging baz
749 749 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
750 750 (branch merge, don't forget to commit)
751 751 $ hg ci -m 'merge forgetting about baz rewrite'
752 752 $ cat > baz << EOF
753 753 > 0
754 754 > 0
755 755 > 1 baz:1
756 756 > 2+ baz:2
757 757 > 3+ baz:3
758 758 > 4 baz:4
759 759 > 5
760 760 > 6
761 761 > EOF
762 762 $ hg ci -m 'baz: narrow change (2->2+)'
763 763 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:4, startrev=20, descend=True)' --graph
764 764 @ 33: baz: narrow change (2->2+)
765 765 |
766 766 o 32: merge forgetting about baz rewrite
767 767 |\
768 768 | o 31: baz: mostly rewrite with some content from 24
769 769 | :
770 770 | : o 30: baz:3->+3
771 771 | :/
772 772 +---o 27: baz:3+->3-
773 773 | :
774 774 o : 24: baz:3->3+
775 775 :/
776 776 o 20: baz:4
777 777 |\
778 778 ~ ~
779 779
780 780 check error cases
781 781 $ hg up 24 --quiet
782 782 $ hg log -r 'followlines()'
783 783 hg: parse error: followlines takes at least 1 positional arguments
784 784 [255]
785 785 $ hg log -r 'followlines(baz)'
786 786 hg: parse error: followlines requires a line range
787 787 [255]
788 788 $ hg log -r 'followlines(baz, 1)'
789 789 hg: parse error: followlines expects a line range
790 790 [255]
791 791 $ hg log -r 'followlines(baz, 1:2, startrev=desc("b"))'
792 792 hg: parse error: followlines expects exactly one revision
793 793 [255]
794 794 $ hg log -r 'followlines("glob:*", 1:2)'
795 795 hg: parse error: followlines expects exactly one file
796 796 [255]
797 797 $ hg log -r 'followlines(baz, 1:)'
798 798 hg: parse error: line range bounds must be integers
799 799 [255]
800 800 $ hg log -r 'followlines(baz, :1)'
801 801 hg: parse error: line range bounds must be integers
802 802 [255]
803 803 $ hg log -r 'followlines(baz, x:4)'
804 804 hg: parse error: line range bounds must be integers
805 805 [255]
806 806 $ hg log -r 'followlines(baz, 5:4)'
807 807 hg: parse error: line range must be positive
808 808 [255]
809 809 $ hg log -r 'followlines(baz, 0:4)'
810 810 hg: parse error: fromline must be strictly positive
811 811 [255]
812 812 $ hg log -r 'followlines(baz, 2:40)'
813 813 abort: line range exceeds file size
814 814 [255]
815 815 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
816 816 hg: parse error at 43: not a prefix: [
817 817 (followlines(baz, 2:4, startrev=20, descend=[1])
818 818 ^ here)
819 819 [255]
820 820 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
821 821 hg: parse error: descend argument must be a boolean
822 822 [255]
823 823
824 824 Test empty annotate output
825 825
826 826 $ printf '\0' > binary
827 827 $ touch empty
828 828 $ hg ci -qAm 'add binary and empty files'
829 829
830 830 $ hg annotate binary empty
831 831 binary: binary file
832 832
833 833 $ hg annotate -Tjson binary empty
834 834 [
835 835 {
836 836 "abspath": "binary",
837 837 "path": "binary"
838 838 },
839 839 {
840 840 "abspath": "empty",
841 841 "lines": [],
842 842 "path": "empty"
843 843 }
844 844 ]
845 845
846 846 Test annotate with whitespace options
847 847
848 848 $ cd ..
849 849 $ hg init repo-ws
850 850 $ cd repo-ws
851 851 $ cat > a <<EOF
852 852 > aa
853 853 >
854 854 > b b
855 855 > EOF
856 856 $ hg ci -Am "adda"
857 857 adding a
858 858 $ sed 's/EOL$//g' > a <<EOF
859 859 > a a
860 860 >
861 861 > EOL
862 862 > b b
863 863 > EOF
864 864 $ hg ci -m "changea"
865 865
866 866 Annotate with no option
867 867
868 868 $ hg annotate a
869 869 1: a a
870 870 0:
871 871 1:
872 872 1: b b
873 873
874 874 Annotate with --ignore-space-change
875 875
876 876 $ hg annotate --ignore-space-change a
877 877 1: a a
878 878 1:
879 879 0:
880 880 0: b b
881 881
882 882 Annotate with --ignore-all-space
883 883
884 884 $ hg annotate --ignore-all-space a
885 885 0: a a
886 886 0:
887 887 1:
888 888 0: b b
889 889
890 890 Annotate with --ignore-blank-lines (similar to no options case)
891 891
892 892 $ hg annotate --ignore-blank-lines a
893 893 1: a a
894 894 0:
895 895 1:
896 896 1: b b
897 897
898 898 $ cd ..
899 899
900 Annotate with orphaned CR (issue5798)
901 -------------------------------------
902
903 $ hg init repo-cr
904 $ cd repo-cr
905
906 $ substcr() {
907 > sed 's/\r/[CR]/g'
908 > }
909
910 >>> with open('a', 'wb') as f:
911 ... f.write(b'0a\r0b\r\n0c\r0d\r\n0e\n0f\n0g')
912 $ hg ci -qAm0
913 >>> with open('a', 'wb') as f:
914 ... f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g')
915 $ hg ci -m1
916
917 $ hg annotate -r0 a | substcr
918 0: 0a[CR]0b[CR]
919 0: 0c[CR]0d[CR]
920 0: 0e
921 0: 0f
922 0: 0g
923 $ hg annotate -r1 a | substcr
924 0: 0a[CR]0b[CR]
925 1: 1c[CR]1d[CR]
926 0: 0e
927 1: 1f
928 0: 0g
929
930 $ cd ..
931
900 932 Annotate with linkrev pointing to another branch
901 933 ------------------------------------------------
902 934
903 935 create history with a filerev whose linkrev points to another branch
904 936
905 937 $ hg init branchedlinkrev
906 938 $ cd branchedlinkrev
907 939 $ echo A > a
908 940 $ hg commit -Am 'contentA'
909 941 adding a
910 942 $ echo B >> a
911 943 $ hg commit -m 'contentB'
912 944 $ hg up --rev 'desc(contentA)'
913 945 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
914 946 $ echo unrelated > unrelated
915 947 $ hg commit -Am 'unrelated'
916 948 adding unrelated
917 949 created new head
918 950 $ hg graft -r 'desc(contentB)'
919 951 grafting 1:fd27c222e3e6 "contentB"
920 952 $ echo C >> a
921 953 $ hg commit -m 'contentC'
922 954 $ echo W >> a
923 955 $ hg log -G
924 956 @ changeset: 4:072f1e8df249
925 957 | tag: tip
926 958 | user: test
927 959 | date: Thu Jan 01 00:00:00 1970 +0000
928 960 | summary: contentC
929 961 |
930 962 o changeset: 3:ff38df03cc4b
931 963 | user: test
932 964 | date: Thu Jan 01 00:00:00 1970 +0000
933 965 | summary: contentB
934 966 |
935 967 o changeset: 2:62aaf3f6fc06
936 968 | parent: 0:f0932f74827e
937 969 | user: test
938 970 | date: Thu Jan 01 00:00:00 1970 +0000
939 971 | summary: unrelated
940 972 |
941 973 | o changeset: 1:fd27c222e3e6
942 974 |/ user: test
943 975 | date: Thu Jan 01 00:00:00 1970 +0000
944 976 | summary: contentB
945 977 |
946 978 o changeset: 0:f0932f74827e
947 979 user: test
948 980 date: Thu Jan 01 00:00:00 1970 +0000
949 981 summary: contentA
950 982
951 983
952 984 Annotate should list ancestor of starting revision only
953 985
954 986 $ hg annotate a
955 987 0: A
956 988 3: B
957 989 4: C
958 990
959 991 $ hg annotate a -r 'wdir()'
960 992 0 : A
961 993 3 : B
962 994 4 : C
963 995 4+: W
964 996
965 997 Even when the starting revision is the linkrev-shadowed one:
966 998
967 999 $ hg annotate a -r 3
968 1000 0: A
969 1001 3: B
970 1002
971 1003 $ cd ..
972 1004
973 1005 Issue5360: Deleted chunk in p1 of a merge changeset
974 1006
975 1007 $ hg init repo-5360
976 1008 $ cd repo-5360
977 1009 $ echo 1 > a
978 1010 $ hg commit -A a -m 1
979 1011 $ echo 2 >> a
980 1012 $ hg commit -m 2
981 1013 $ echo a > a
982 1014 $ hg commit -m a
983 1015 $ hg update '.^' -q
984 1016 $ echo 3 >> a
985 1017 $ hg commit -m 3 -q
986 1018 $ hg merge 2 -q
987 1019 $ cat > a << EOF
988 1020 > b
989 1021 > 1
990 1022 > 2
991 1023 > 3
992 1024 > a
993 1025 > EOF
994 1026 $ hg resolve --mark -q
995 1027 $ hg commit -m m
996 1028 $ hg annotate a
997 1029 4: b
998 1030 0: 1
999 1031 1: 2
1000 1032 3: 3
1001 1033 2: a
1002 1034
1003 1035 $ cd ..
@@ -1,622 +1,696
1 1 Create test repository:
2 2
3 3 $ hg init repo
4 4 $ cd repo
5 5 $ echo x1 > x.txt
6 6
7 7 $ hg init foo
8 8 $ cd foo
9 9 $ echo y1 > y.txt
10 10
11 11 $ hg init bar
12 12 $ cd bar
13 13 $ echo z1 > z.txt
14 14
15 15 $ cd ..
16 16 $ echo 'bar = bar' > .hgsub
17 17
18 18 $ cd ..
19 19 $ echo 'foo = foo' > .hgsub
20 20
21 21 Add files --- .hgsub files must go first to trigger subrepos:
22 22
23 23 $ hg add -S .hgsub
24 24 $ hg add -S foo/.hgsub
25 25 $ hg add -S foo/bar
26 26 adding foo/bar/z.txt
27 27 $ hg add -S
28 28 adding x.txt
29 29 adding foo/y.txt
30 30
31 31 Test recursive status without committing anything:
32 32
33 33 $ hg status -S
34 34 A .hgsub
35 35 A foo/.hgsub
36 36 A foo/bar/z.txt
37 37 A foo/y.txt
38 38 A x.txt
39 39
40 40 Test recursive diff without committing anything:
41 41
42 42 $ hg diff --nodates -S foo
43 43 diff -r 000000000000 foo/.hgsub
44 44 --- /dev/null
45 45 +++ b/foo/.hgsub
46 46 @@ -0,0 +1,1 @@
47 47 +bar = bar
48 48 diff -r 000000000000 foo/y.txt
49 49 --- /dev/null
50 50 +++ b/foo/y.txt
51 51 @@ -0,0 +1,1 @@
52 52 +y1
53 53 diff -r 000000000000 foo/bar/z.txt
54 54 --- /dev/null
55 55 +++ b/foo/bar/z.txt
56 56 @@ -0,0 +1,1 @@
57 57 +z1
58 58
59 59 Commits:
60 60
61 61 $ hg commit -m fails
62 62 abort: uncommitted changes in subrepository "foo"
63 63 (use --subrepos for recursive commit)
64 64 [255]
65 65
66 66 The --subrepos flag overwrite the config setting:
67 67
68 68 $ hg commit -m 0-0-0 --config ui.commitsubrepos=No --subrepos
69 69 committing subrepository foo
70 70 committing subrepository foo/bar
71 71
72 72 $ cd foo
73 73 $ echo y2 >> y.txt
74 74 $ hg commit -m 0-1-0
75 75
76 76 $ cd bar
77 77 $ echo z2 >> z.txt
78 78 $ hg commit -m 0-1-1
79 79
80 80 $ cd ..
81 81 $ hg commit -m 0-2-1
82 82
83 83 $ cd ..
84 84 $ hg commit -m 1-2-1
85 85
86 86 Change working directory:
87 87
88 88 $ echo y3 >> foo/y.txt
89 89 $ echo z3 >> foo/bar/z.txt
90 90 $ hg status -S
91 91 M foo/bar/z.txt
92 92 M foo/y.txt
93 93 $ hg diff --nodates -S
94 94 diff -r d254738c5f5e foo/y.txt
95 95 --- a/foo/y.txt
96 96 +++ b/foo/y.txt
97 97 @@ -1,2 +1,3 @@
98 98 y1
99 99 y2
100 100 +y3
101 101 diff -r 9647f22de499 foo/bar/z.txt
102 102 --- a/foo/bar/z.txt
103 103 +++ b/foo/bar/z.txt
104 104 @@ -1,2 +1,3 @@
105 105 z1
106 106 z2
107 107 +z3
108 108
109 109 Status call crossing repository boundaries:
110 110
111 111 $ hg status -S foo/bar/z.txt
112 112 M foo/bar/z.txt
113 113 $ hg status -S -I 'foo/?.txt'
114 114 M foo/y.txt
115 115 $ hg status -S -I '**/?.txt'
116 116 M foo/bar/z.txt
117 117 M foo/y.txt
118 118 $ hg diff --nodates -S -I '**/?.txt'
119 119 diff -r d254738c5f5e foo/y.txt
120 120 --- a/foo/y.txt
121 121 +++ b/foo/y.txt
122 122 @@ -1,2 +1,3 @@
123 123 y1
124 124 y2
125 125 +y3
126 126 diff -r 9647f22de499 foo/bar/z.txt
127 127 --- a/foo/bar/z.txt
128 128 +++ b/foo/bar/z.txt
129 129 @@ -1,2 +1,3 @@
130 130 z1
131 131 z2
132 132 +z3
133 133
134 134 Status from within a subdirectory:
135 135
136 136 $ mkdir dir
137 137 $ cd dir
138 138 $ echo a1 > a.txt
139 139 $ hg status -S
140 140 M foo/bar/z.txt
141 141 M foo/y.txt
142 142 ? dir/a.txt
143 143 $ hg diff --nodates -S
144 144 diff -r d254738c5f5e foo/y.txt
145 145 --- a/foo/y.txt
146 146 +++ b/foo/y.txt
147 147 @@ -1,2 +1,3 @@
148 148 y1
149 149 y2
150 150 +y3
151 151 diff -r 9647f22de499 foo/bar/z.txt
152 152 --- a/foo/bar/z.txt
153 153 +++ b/foo/bar/z.txt
154 154 @@ -1,2 +1,3 @@
155 155 z1
156 156 z2
157 157 +z3
158 158
159 159 Status with relative path:
160 160
161 161 $ hg status -S ..
162 162 M ../foo/bar/z.txt
163 163 M ../foo/y.txt
164 164 ? a.txt
165 165
166 166 XXX: filtering lfilesrepo.status() in 3.3-rc causes these files to be listed as
167 167 added instead of modified.
168 168 $ hg status -S .. --config extensions.largefiles=
169 169 M ../foo/bar/z.txt
170 170 M ../foo/y.txt
171 171 ? a.txt
172 172
173 173 $ hg diff --nodates -S ..
174 174 diff -r d254738c5f5e foo/y.txt
175 175 --- a/foo/y.txt
176 176 +++ b/foo/y.txt
177 177 @@ -1,2 +1,3 @@
178 178 y1
179 179 y2
180 180 +y3
181 181 diff -r 9647f22de499 foo/bar/z.txt
182 182 --- a/foo/bar/z.txt
183 183 +++ b/foo/bar/z.txt
184 184 @@ -1,2 +1,3 @@
185 185 z1
186 186 z2
187 187 +z3
188 188 $ cd ..
189 189
190 190 Cleanup and final commit:
191 191
192 192 $ rm -r dir
193 193 $ hg commit --subrepos -m 2-3-2
194 194 committing subrepository foo
195 195 committing subrepository foo/bar
196 196
197 197 Test explicit path commands within subrepos: add/forget
198 198 $ echo z1 > foo/bar/z2.txt
199 199 $ hg status -S
200 200 ? foo/bar/z2.txt
201 201 $ hg add foo/bar/z2.txt
202 202 $ hg status -S
203 203 A foo/bar/z2.txt
204 204 $ hg forget foo/bar/z2.txt
205 205 $ hg status -S
206 206 ? foo/bar/z2.txt
207 207 $ hg forget foo/bar/z2.txt
208 208 not removing foo/bar/z2.txt: file is already untracked
209 209 [1]
210 210 $ hg status -S
211 211 ? foo/bar/z2.txt
212 212 $ rm foo/bar/z2.txt
213 213
214 214 Log with the relationships between repo and its subrepo:
215 215
216 216 $ hg log --template '{rev}:{node|short} {desc}\n'
217 217 2:1326fa26d0c0 2-3-2
218 218 1:4b3c9ff4f66b 1-2-1
219 219 0:23376cbba0d8 0-0-0
220 220
221 221 $ hg -R foo log --template '{rev}:{node|short} {desc}\n'
222 222 3:65903cebad86 2-3-2
223 223 2:d254738c5f5e 0-2-1
224 224 1:8629ce7dcc39 0-1-0
225 225 0:af048e97ade2 0-0-0
226 226
227 227 $ hg -R foo/bar log --template '{rev}:{node|short} {desc}\n'
228 228 2:31ecbdafd357 2-3-2
229 229 1:9647f22de499 0-1-1
230 230 0:4904098473f9 0-0-0
231 231
232 232 Status between revisions:
233 233
234 234 $ hg status -S
235 235 $ hg status -S --rev 0:1
236 236 M .hgsubstate
237 237 M foo/.hgsubstate
238 238 M foo/bar/z.txt
239 239 M foo/y.txt
240 240 $ hg diff --nodates -S -I '**/?.txt' --rev 0:1
241 241 diff -r af048e97ade2 -r d254738c5f5e foo/y.txt
242 242 --- a/foo/y.txt
243 243 +++ b/foo/y.txt
244 244 @@ -1,1 +1,2 @@
245 245 y1
246 246 +y2
247 247 diff -r 4904098473f9 -r 9647f22de499 foo/bar/z.txt
248 248 --- a/foo/bar/z.txt
249 249 +++ b/foo/bar/z.txt
250 250 @@ -1,1 +1,2 @@
251 251 z1
252 252 +z2
253 253
254 254 #if serve
255 255 $ cd ..
256 256 $ hg serve -R repo --debug -S -p $HGPORT -d --pid-file=hg1.pid -E error.log -A access.log
257 257 adding = $TESTTMP/repo
258 258 adding foo = $TESTTMP/repo/foo
259 259 adding foo/bar = $TESTTMP/repo/foo/bar
260 260 listening at http://*:$HGPORT/ (bound to *:$HGPORT) (glob) (?)
261 261 adding = $TESTTMP/repo (?)
262 262 adding foo = $TESTTMP/repo/foo (?)
263 263 adding foo/bar = $TESTTMP/repo/foo/bar (?)
264 264 $ cat hg1.pid >> $DAEMON_PIDS
265 265
266 266 $ hg clone http://localhost:$HGPORT clone --config progress.disable=True
267 267 requesting all changes
268 268 adding changesets
269 269 adding manifests
270 270 adding file changes
271 271 added 3 changesets with 5 changes to 3 files
272 272 new changesets 23376cbba0d8:1326fa26d0c0
273 273 updating to branch default
274 274 cloning subrepo foo from http://localhost:$HGPORT/foo
275 275 requesting all changes
276 276 adding changesets
277 277 adding manifests
278 278 adding file changes
279 279 added 4 changesets with 7 changes to 3 files
280 280 new changesets af048e97ade2:65903cebad86
281 281 cloning subrepo foo/bar from http://localhost:$HGPORT/foo/bar
282 282 requesting all changes
283 283 adding changesets
284 284 adding manifests
285 285 adding file changes
286 286 added 3 changesets with 3 changes to 1 files
287 287 new changesets 4904098473f9:31ecbdafd357
288 288 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
289 289
290 290 $ cat clone/foo/bar/z.txt
291 291 z1
292 292 z2
293 293 z3
294 294
295 Clone pooling from a remote URL will share the top level repo and the subrepos,
296 even if they are referenced by remote URL.
297
298 $ hg --config extensions.share= --config share.pool=$TESTTMP/pool \
299 > clone http://localhost:$HGPORT shared
300 (sharing from new pooled repository 23376cbba0d87c15906bb3652584927c140907bf)
301 requesting all changes
302 adding changesets
303 adding manifests
304 adding file changes
305 added 3 changesets with 5 changes to 3 files
306 new changesets 23376cbba0d8:1326fa26d0c0
307 searching for changes
308 no changes found
309 updating working directory
310 cloning subrepo foo from http://localhost:$HGPORT/foo
311 (sharing from new pooled repository af048e97ade2e236f754f05d07013e586af0f8bf)
312 requesting all changes
313 adding changesets
314 adding manifests
315 adding file changes
316 added 4 changesets with 7 changes to 3 files
317 new changesets af048e97ade2:65903cebad86
318 searching for changes
319 no changes found
320 cloning subrepo foo/bar from http://localhost:$HGPORT/foo/bar
321 (sharing from new pooled repository 4904098473f96c900fec436dad267edd4da59fad)
322 requesting all changes
323 adding changesets
324 adding manifests
325 adding file changes
326 added 3 changesets with 3 changes to 1 files
327 new changesets 4904098473f9:31ecbdafd357
328 searching for changes
329 no changes found
330 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
331
295 332 $ cat access.log
296 333 * "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
297 334 * "GET /?cmd=batch HTTP/1.1" 200 - * (glob)
298 335 * "GET /?cmd=getbundle HTTP/1.1" 200 - * (glob)
299 336 * "GET /foo?cmd=capabilities HTTP/1.1" 200 - (glob)
300 337 * "GET /foo?cmd=batch HTTP/1.1" 200 - * (glob)
301 338 * "GET /foo?cmd=getbundle HTTP/1.1" 200 - * (glob)
302 339 * "GET /foo/bar?cmd=capabilities HTTP/1.1" 200 - (glob)
303 340 * "GET /foo/bar?cmd=batch HTTP/1.1" 200 - * (glob)
304 341 * "GET /foo/bar?cmd=getbundle HTTP/1.1" 200 - * (glob)
342 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
343 $LOCALIP - - [$LOGDATE$] "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=0 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
344 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
345 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
346 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=1326fa26d0c00d2146c63b56bb6a45149d7325ac&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
347 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D1326fa26d0c00d2146c63b56bb6a45149d7325ac x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
348 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=1326fa26d0c00d2146c63b56bb6a45149d7325ac&heads=1326fa26d0c00d2146c63b56bb6a45149d7325ac&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
349 $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=capabilities HTTP/1.1" 200 - (glob)
350 $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=0 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
351 $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=capabilities HTTP/1.1" 200 - (glob)
352 $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
353 $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=65903cebad86f1a84bd4f1134f62fa7dcb7a1c98&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
354 $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D65903cebad86f1a84bd4f1134f62fa7dcb7a1c98 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
355 $LOCALIP - - [$LOGDATE$] "GET /foo?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=65903cebad86f1a84bd4f1134f62fa7dcb7a1c98&heads=65903cebad86f1a84bd4f1134f62fa7dcb7a1c98&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
356 $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=capabilities HTTP/1.1" 200 - (glob)
357 $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=0 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
358 $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=capabilities HTTP/1.1" 200 - (glob)
359 $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
360 $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=31ecbdafd357f54b281c9bd1d681bb90de219e22&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
361 $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D31ecbdafd357f54b281c9bd1d681bb90de219e22 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
362 $LOCALIP - - [$LOGDATE$] "GET /foo/bar?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=31ecbdafd357f54b281c9bd1d681bb90de219e22&heads=31ecbdafd357f54b281c9bd1d681bb90de219e22&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ (glob)
305 363
306 364 $ killdaemons.py
307 365 $ rm hg1.pid error.log access.log
308 366 $ cd repo
309 367 #endif
310 368
311 369 Enable progress extension for archive tests:
312 370
313 371 $ cp $HGRCPATH $HGRCPATH.no-progress
314 372 $ cat >> $HGRCPATH <<EOF
315 373 > [progress]
316 374 > disable=False
317 375 > assume-tty = 1
318 376 > delay = 0
319 377 > # set changedelay really large so we don't see nested topics
320 378 > changedelay = 30000
321 379 > format = topic bar number
322 380 > refresh = 0
323 381 > width = 60
324 382 > EOF
325 383
326 384 Test archiving to a directory tree (the doubled lines in the output
327 385 only show up in the test output, not in real usage):
328 386
329 387 $ hg archive --subrepos ../archive
330 388 \r (no-eol) (esc)
331 389 archiving [ ] 0/3\r (no-eol) (esc)
332 390 archiving [=============> ] 1/3\r (no-eol) (esc)
333 391 archiving [===========================> ] 2/3\r (no-eol) (esc)
334 392 archiving [==========================================>] 3/3\r (no-eol) (esc)
335 393 \r (no-eol) (esc)
336 394 \r (no-eol) (esc)
337 395 archiving (foo) [ ] 0/3\r (no-eol) (esc)
338 396 archiving (foo) [===========> ] 1/3\r (no-eol) (esc)
339 397 archiving (foo) [=======================> ] 2/3\r (no-eol) (esc)
340 398 archiving (foo) [====================================>] 3/3\r (no-eol) (esc)
341 399 \r (no-eol) (esc)
342 400 \r (no-eol) (esc)
343 401 archiving (foo/bar) [ ] 0/1\r (no-eol) (esc)
344 402 archiving (foo/bar) [================================>] 1/1\r (no-eol) (esc)
345 403 \r (no-eol) (esc)
346 404 $ find ../archive | sort
347 405 ../archive
348 406 ../archive/.hg_archival.txt
349 407 ../archive/.hgsub
350 408 ../archive/.hgsubstate
351 409 ../archive/foo
352 410 ../archive/foo/.hgsub
353 411 ../archive/foo/.hgsubstate
354 412 ../archive/foo/bar
355 413 ../archive/foo/bar/z.txt
356 414 ../archive/foo/y.txt
357 415 ../archive/x.txt
358 416
359 417 Test archiving to zip file (unzip output is unstable):
360 418
361 419 $ hg archive --subrepos --prefix '.' ../archive.zip
362 420 \r (no-eol) (esc)
363 421 archiving [ ] 0/3\r (no-eol) (esc)
364 422 archiving [=============> ] 1/3\r (no-eol) (esc)
365 423 archiving [===========================> ] 2/3\r (no-eol) (esc)
366 424 archiving [==========================================>] 3/3\r (no-eol) (esc)
367 425 \r (no-eol) (esc)
368 426 \r (no-eol) (esc)
369 427 archiving (foo) [ ] 0/3\r (no-eol) (esc)
370 428 archiving (foo) [===========> ] 1/3\r (no-eol) (esc)
371 429 archiving (foo) [=======================> ] 2/3\r (no-eol) (esc)
372 430 archiving (foo) [====================================>] 3/3\r (no-eol) (esc)
373 431 \r (no-eol) (esc)
374 432 \r (no-eol) (esc)
375 433 archiving (foo/bar) [ ] 0/1\r (no-eol) (esc)
376 434 archiving (foo/bar) [================================>] 1/1\r (no-eol) (esc)
377 435 \r (no-eol) (esc)
378 436
379 437 (unzip date formating is unstable, we do not care about it and glob it out)
380 438
381 439 $ unzip -l ../archive.zip | grep -v -- ----- | egrep -v files$
382 440 Archive: ../archive.zip
383 441 Length [ ]* Date [ ]* Time [ ]* Name (re)
384 442 172 [0-9:\- ]* .hg_archival.txt (re)
385 443 10 [0-9:\- ]* .hgsub (re)
386 444 45 [0-9:\- ]* .hgsubstate (re)
387 445 3 [0-9:\- ]* x.txt (re)
388 446 10 [0-9:\- ]* foo/.hgsub (re)
389 447 45 [0-9:\- ]* foo/.hgsubstate (re)
390 448 9 [0-9:\- ]* foo/y.txt (re)
391 449 9 [0-9:\- ]* foo/bar/z.txt (re)
392 450
393 451 Test archiving a revision that references a subrepo that is not yet
394 452 cloned:
395 453
396 454 #if hardlink
397 455 $ hg clone -U . ../empty
398 456 \r (no-eol) (esc)
399 457 linking [ <=> ] 1\r (no-eol) (esc)
400 458 linking [ <=> ] 2\r (no-eol) (esc)
401 459 linking [ <=> ] 3\r (no-eol) (esc)
402 460 linking [ <=> ] 4\r (no-eol) (esc)
403 461 linking [ <=> ] 5\r (no-eol) (esc)
404 462 linking [ <=> ] 6\r (no-eol) (esc)
405 463 linking [ <=> ] 7\r (no-eol) (esc)
406 464 linking [ <=> ] 8\r (no-eol) (esc)
407 465 \r (no-eol) (esc)
408 466 #else
409 467 $ hg clone -U . ../empty
410 468 \r (no-eol) (esc)
411 469 linking [ <=> ] 1 (no-eol)
412 470 #endif
413 471
414 472 $ cd ../empty
415 473 #if hardlink
416 474 $ hg archive --subrepos -r tip --prefix './' ../archive.tar.gz
417 475 \r (no-eol) (esc)
418 476 archiving [ ] 0/3\r (no-eol) (esc)
419 477 archiving [=============> ] 1/3\r (no-eol) (esc)
420 478 archiving [===========================> ] 2/3\r (no-eol) (esc)
421 479 archiving [==========================================>] 3/3\r (no-eol) (esc)
422 480 \r (no-eol) (esc)
423 481 \r (no-eol) (esc)
424 482 linking [ <=> ] 1\r (no-eol) (esc)
425 483 linking [ <=> ] 2\r (no-eol) (esc)
426 484 linking [ <=> ] 3\r (no-eol) (esc)
427 485 linking [ <=> ] 4\r (no-eol) (esc)
428 486 linking [ <=> ] 5\r (no-eol) (esc)
429 487 linking [ <=> ] 6\r (no-eol) (esc)
430 488 linking [ <=> ] 7\r (no-eol) (esc)
431 489 linking [ <=> ] 8\r (no-eol) (esc)
432 490 \r (no-eol) (esc)
433 491 \r (no-eol) (esc)
434 492 archiving (foo) [ ] 0/3\r (no-eol) (esc)
435 493 archiving (foo) [===========> ] 1/3\r (no-eol) (esc)
436 494 archiving (foo) [=======================> ] 2/3\r (no-eol) (esc)
437 495 archiving (foo) [====================================>] 3/3\r (no-eol) (esc)
438 496 \r (no-eol) (esc)
439 497 \r (no-eol) (esc)
440 498 linking [ <=> ] 1\r (no-eol) (esc)
441 499 linking [ <=> ] 2\r (no-eol) (esc)
442 500 linking [ <=> ] 3\r (no-eol) (esc)
443 501 linking [ <=> ] 4\r (no-eol) (esc)
444 502 linking [ <=> ] 5\r (no-eol) (esc)
445 503 linking [ <=> ] 6\r (no-eol) (esc)
446 504 \r (no-eol) (esc)
447 505 \r (no-eol) (esc)
448 506 archiving (foo/bar) [ ] 0/1\r (no-eol) (esc)
449 507 archiving (foo/bar) [================================>] 1/1\r (no-eol) (esc)
450 508 \r (no-eol) (esc)
451 509 cloning subrepo foo from $TESTTMP/repo/foo
452 510 cloning subrepo foo/bar from $TESTTMP/repo/foo/bar
453 511 #else
454 512 Note there's a slight output glitch on non-hardlink systems: the last
455 513 "linking" progress topic never gets closed, leading to slight output corruption on that platform.
456 514 $ hg archive --subrepos -r tip --prefix './' ../archive.tar.gz
457 515 \r (no-eol) (esc)
458 516 archiving [ ] 0/3\r (no-eol) (esc)
459 517 archiving [=============> ] 1/3\r (no-eol) (esc)
460 518 archiving [===========================> ] 2/3\r (no-eol) (esc)
461 519 archiving [==========================================>] 3/3\r (no-eol) (esc)
462 520 \r (no-eol) (esc)
463 521 \r (no-eol) (esc)
464 522 linking [ <=> ] 1\r (no-eol) (esc)
465 523 cloning subrepo foo/bar from $TESTTMP/repo/foo/bar
466 524 #endif
467 525
468 526 Archive + subrepos uses '/' for all component separators
469 527
470 528 $ tar -tzf ../archive.tar.gz | sort
471 529 .hg_archival.txt
472 530 .hgsub
473 531 .hgsubstate
474 532 foo/.hgsub
475 533 foo/.hgsubstate
476 534 foo/bar/z.txt
477 535 foo/y.txt
478 536 x.txt
479 537
480 538 The newly cloned subrepos contain no working copy:
481 539
482 540 $ hg -R foo summary
483 541 parent: -1:000000000000 (no revision checked out)
484 542 branch: default
485 543 commit: (clean)
486 544 update: 4 new changesets (update)
487 545
546 Sharing a local repo without the locally referenced subrepo (i.e. it was never
547 updated from null), fails the same as a clone operation.
548
549 $ hg --config progress.disable=True clone -U ../empty ../empty2
550
551 $ hg --config extensions.share= --config progress.disable=True \
552 > share ../empty2 ../empty_share
553 updating working directory
554 abort: repository $TESTTMP/empty2/foo not found!
555 [255]
556
557 $ hg --config progress.disable=True clone ../empty2 ../empty_clone
558 updating to branch default
559 abort: repository $TESTTMP/empty2/foo not found!
560 [255]
561
488 562 Disable progress extension and cleanup:
489 563
490 564 $ mv $HGRCPATH.no-progress $HGRCPATH
491 565
492 566 Test archiving when there is a directory in the way for a subrepo
493 567 created by archive:
494 568
495 569 $ hg clone -U . ../almost-empty
496 570 $ cd ../almost-empty
497 571 $ mkdir foo
498 572 $ echo f > foo/f
499 573 $ hg archive --subrepos -r tip archive
500 574 cloning subrepo foo from $TESTTMP/empty/foo
501 575 abort: destination '$TESTTMP/almost-empty/foo' is not empty (in subrepository "foo")
502 576 [255]
503 577
504 578 Clone and test outgoing:
505 579
506 580 $ cd ..
507 581 $ hg clone repo repo2
508 582 updating to branch default
509 583 cloning subrepo foo from $TESTTMP/repo/foo
510 584 cloning subrepo foo/bar from $TESTTMP/repo/foo/bar
511 585 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
512 586 $ cd repo2
513 587 $ hg outgoing -S
514 588 comparing with $TESTTMP/repo
515 589 searching for changes
516 590 no changes found
517 591 comparing with $TESTTMP/repo/foo
518 592 searching for changes
519 593 no changes found
520 594 comparing with $TESTTMP/repo/foo/bar
521 595 searching for changes
522 596 no changes found
523 597 [1]
524 598
525 599 Make nested change:
526 600
527 601 $ echo y4 >> foo/y.txt
528 602 $ hg diff --nodates -S
529 603 diff -r 65903cebad86 foo/y.txt
530 604 --- a/foo/y.txt
531 605 +++ b/foo/y.txt
532 606 @@ -1,3 +1,4 @@
533 607 y1
534 608 y2
535 609 y3
536 610 +y4
537 611 $ hg commit --subrepos -m 3-4-2
538 612 committing subrepository foo
539 613 $ hg outgoing -S
540 614 comparing with $TESTTMP/repo
541 615 searching for changes
542 616 changeset: 3:2655b8ecc4ee
543 617 tag: tip
544 618 user: test
545 619 date: Thu Jan 01 00:00:00 1970 +0000
546 620 summary: 3-4-2
547 621
548 622 comparing with $TESTTMP/repo/foo
549 623 searching for changes
550 624 changeset: 4:e96193d6cb36
551 625 tag: tip
552 626 user: test
553 627 date: Thu Jan 01 00:00:00 1970 +0000
554 628 summary: 3-4-2
555 629
556 630 comparing with $TESTTMP/repo/foo/bar
557 631 searching for changes
558 632 no changes found
559 633
560 634
561 635 Switch to original repo and setup default path:
562 636
563 637 $ cd ../repo
564 638 $ echo '[paths]' >> .hg/hgrc
565 639 $ echo 'default = ../repo2' >> .hg/hgrc
566 640
567 641 Test incoming:
568 642
569 643 $ hg incoming -S
570 644 comparing with $TESTTMP/repo2
571 645 searching for changes
572 646 changeset: 3:2655b8ecc4ee
573 647 tag: tip
574 648 user: test
575 649 date: Thu Jan 01 00:00:00 1970 +0000
576 650 summary: 3-4-2
577 651
578 652 comparing with $TESTTMP/repo2/foo
579 653 searching for changes
580 654 changeset: 4:e96193d6cb36
581 655 tag: tip
582 656 user: test
583 657 date: Thu Jan 01 00:00:00 1970 +0000
584 658 summary: 3-4-2
585 659
586 660 comparing with $TESTTMP/repo2/foo/bar
587 661 searching for changes
588 662 no changes found
589 663
590 664 $ hg incoming -S --bundle incoming.hg
591 665 abort: cannot combine --bundle and --subrepos
592 666 [255]
593 667
594 668 Test missing subrepo:
595 669
596 670 $ rm -r foo
597 671 $ hg status -S
598 672 warning: error "unknown revision '65903cebad86f1a84bd4f1134f62fa7dcb7a1c98'" in subrepository "foo"
599 673
600 674 Issue2619: IndexError: list index out of range on hg add with subrepos
601 675 The subrepo must sorts after the explicit filename.
602 676
603 677 $ cd ..
604 678 $ hg init test
605 679 $ cd test
606 680 $ hg init x
607 681 $ echo abc > abc.txt
608 682 $ hg ci -Am "abc"
609 683 adding abc.txt
610 684 $ echo "x = x" >> .hgsub
611 685 $ hg add .hgsub
612 686 $ touch a x/a
613 687 $ hg add a x/a
614 688
615 689 $ hg ci -Sm "added x"
616 690 committing subrepository x
617 691 $ echo abc > x/a
618 692 $ hg revert --rev '.^' "set:subrepo('glob:x*')"
619 693 abort: subrepository 'x' does not exist in 25ac2c9b3180!
620 694 [255]
621 695
622 696 $ cd ..
@@ -1,109 +1,190
1 1 #require killdaemons
2 2
3 3 Preparing the subrepository 'sub'
4 4
5 5 $ hg init sub
6 6 $ echo sub > sub/sub
7 7 $ hg add -R sub
8 8 adding sub/sub
9 9 $ hg commit -R sub -m "sub import"
10 10
11 11 Preparing the 'main' repo which depends on the subrepo 'sub'
12 12
13 13 $ hg init main
14 14 $ echo main > main/main
15 15 $ echo "sub = ../sub" > main/.hgsub
16 16 $ hg clone sub main/sub
17 17 updating to branch default
18 18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 19 $ hg add -R main
20 20 adding main/.hgsub
21 21 adding main/main
22 22 $ hg commit -R main -m "main import"
23 23
24 24 Cleaning both repositories, just as a clone -U
25 25
26 26 $ hg up -C -R sub null
27 27 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
28 28 $ hg up -C -R main null
29 29 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
30 30 $ rm -rf main/sub
31 31
32 32 hide outer repo
33 33 $ hg init
34 34
35 35 Serving them both using hgweb
36 36
37 37 $ printf '[paths]\n/main = main\nsub = sub\n' > webdir.conf
38 38 $ hg serve --webdir-conf webdir.conf -a localhost -p $HGPORT \
39 39 > -A /dev/null -E /dev/null --pid-file hg.pid -d
40 40 $ cat hg.pid >> $DAEMON_PIDS
41 41
42 42 Clone main from hgweb
43 43
44 44 $ hg clone "http://localhost:$HGPORT/main" cloned
45 45 requesting all changes
46 46 adding changesets
47 47 adding manifests
48 48 adding file changes
49 49 added 1 changesets with 3 changes to 3 files
50 50 new changesets fdfeeb3e979e
51 51 updating to branch default
52 52 cloning subrepo sub from http://localhost:$HGPORT/sub
53 53 requesting all changes
54 54 adding changesets
55 55 adding manifests
56 56 adding file changes
57 57 added 1 changesets with 1 changes to 1 files
58 58 new changesets 863c1745b441
59 59 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 60
61 61 Checking cloned repo ids
62 62
63 63 $ hg id -R cloned
64 64 fdfeeb3e979e tip
65 65 $ hg id -R cloned/sub
66 66 863c1745b441 tip
67 67
68 68 subrepo debug for 'main' clone
69 69
70 70 $ hg debugsub -R cloned
71 71 path sub
72 72 source ../sub
73 73 revision 863c1745b441bd97a8c4a096e87793073f4fb215
74 74
75 Test sharing with a remote URL reference
76
77 $ hg init absolute_subrepo
78 $ cd absolute_subrepo
79 $ echo foo > foo.txt
80 $ hg ci -Am 'initial commit'
81 adding foo.txt
82 $ echo "sub = http://localhost:$HGPORT/sub" > .hgsub
83 $ hg ci -Am 'add absolute subrepo'
84 adding .hgsub
85 $ cd ..
86
87 Clone pooling works for local clones with a remote subrepo reference. The
88 subrepo is cloned to the pool and shared from there, so that all clones will
89 share the same subrepo.
90
91 $ hg --config extensions.share= --config share.pool=$TESTTMP/pool \
92 > clone absolute_subrepo cloned_from_abs
93 (sharing from new pooled repository 8d6a2f1e993b34b6557de0042cfe825ae12a8dae)
94 requesting all changes
95 adding changesets
96 adding manifests
97 adding file changes
98 added 2 changesets with 3 changes to 3 files
99 new changesets 8d6a2f1e993b:* (glob)
100 searching for changes
101 no changes found
102 updating working directory
103 cloning subrepo sub from http://localhost:$HGPORT/sub
104 (sharing from new pooled repository 863c1745b441bd97a8c4a096e87793073f4fb215)
105 requesting all changes
106 adding changesets
107 adding manifests
108 adding file changes
109 added 1 changesets with 1 changes to 1 files
110 new changesets 863c1745b441
111 searching for changes
112 no changes found
113 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
114
115 Vanilla sharing with a subrepo remote path reference will clone the subrepo.
116 Each share of these top level repos will end up with independent subrepo copies
117 (potentially leaving the shared parent with dangling cset references).
118
119 $ hg --config extensions.share= share absolute_subrepo shared_from_abs
120 updating working directory
121 cloning subrepo sub from http://localhost:$HGPORT/sub
122 requesting all changes
123 adding changesets
124 adding manifests
125 adding file changes
126 added 1 changesets with 1 changes to 1 files
127 new changesets 863c1745b441
128 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
129
130 $ hg --config extensions.share= share -U absolute_subrepo shared_from_abs2
131 $ hg -R shared_from_abs2 update -r tip
132 cloning subrepo sub from http://localhost:$HGPORT/sub
133 requesting all changes
134 adding changesets
135 adding manifests
136 adding file changes
137 added 1 changesets with 1 changes to 1 files
138 new changesets 863c1745b441
139 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
140
141 A parent repo without its subrepo available locally can be shared if the
142 subrepo is referenced by absolute path.
143
144 $ hg clone -U absolute_subrepo cloned_null_from_abs
145 $ hg --config extensions.share= share cloned_null_from_abs shared_from_null_abs
146 updating working directory
147 cloning subrepo sub from http://localhost:$HGPORT/sub
148 requesting all changes
149 adding changesets
150 adding manifests
151 adding file changes
152 added 1 changesets with 1 changes to 1 files
153 new changesets 863c1745b441
154 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
155
75 156 $ killdaemons.py
76 157
77 158 subrepo paths with ssh urls
78 159
79 160 $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/cloned sshclone
80 161 requesting all changes
81 162 adding changesets
82 163 adding manifests
83 164 adding file changes
84 165 added 1 changesets with 3 changes to 3 files
85 166 new changesets fdfeeb3e979e
86 167 updating to branch default
87 168 cloning subrepo sub from ssh://user@dummy/sub
88 169 requesting all changes
89 170 adding changesets
90 171 adding manifests
91 172 adding file changes
92 173 added 1 changesets with 1 changes to 1 files
93 174 new changesets 863c1745b441
94 175 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
95 176
96 177 $ hg -R sshclone push -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/cloned
97 178 pushing to ssh://user@dummy/$TESTTMP/cloned
98 179 pushing subrepo sub to ssh://user@dummy/$TESTTMP/sub
99 180 searching for changes
100 181 no changes found
101 182 searching for changes
102 183 no changes found
103 184 [1]
104 185
105 186 $ cat dummylog
106 187 Got arguments 1:user@dummy 2:hg -R cloned serve --stdio
107 188 Got arguments 1:user@dummy 2:hg -R sub serve --stdio
108 189 Got arguments 1:user@dummy 2:hg -R $TESTTMP/cloned serve --stdio
109 190 Got arguments 1:user@dummy 2:hg -R $TESTTMP/sub serve --stdio
General Comments 0
You need to be logged in to leave comments. Login now