##// END OF EJS Templates
verify: don't init subrepo when missing one is referenced (issue5128) (API)...
Matt Harbison -
r29021:92d37fb3 stable
parent child Browse files
Show More
@@ -1,1980 +1,1980
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 bin,
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 wdirid,
23 23 )
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 fileset,
28 28 match as matchmod,
29 29 mdiff,
30 30 obsolete as obsmod,
31 31 patch,
32 32 phases,
33 33 repoview,
34 34 revlog,
35 35 scmutil,
36 36 subrepo,
37 37 util,
38 38 )
39 39
40 40 propertycache = util.propertycache
41 41
42 42 # Phony node value to stand-in for new files in some uses of
43 43 # manifests. Manifests support 21-byte hashes for nodes which are
44 44 # dirty in the working copy.
45 45 _newnode = '!' * 21
46 46
47 47 nonascii = re.compile(r'[^\x21-\x7f]').search
48 48
49 49 class basectx(object):
50 50 """A basectx object represents the common logic for its children:
51 51 changectx: read-only context that is already present in the repo,
52 52 workingctx: a context that represents the working directory and can
53 53 be committed,
54 54 memctx: a context that represents changes in-memory and can also
55 55 be committed."""
56 56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 57 if isinstance(changeid, basectx):
58 58 return changeid
59 59
60 60 o = super(basectx, cls).__new__(cls)
61 61
62 62 o._repo = repo
63 63 o._rev = nullrev
64 64 o._node = nullid
65 65
66 66 return o
67 67
68 68 def __str__(self):
69 69 return short(self.node())
70 70
71 71 def __int__(self):
72 72 return self.rev()
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _manifestmatches(self, match, s):
96 96 """generate a new manifest filtered by the match argument
97 97
98 98 This method is for internal use only and mainly exists to provide an
99 99 object oriented way for other contexts to customize the manifest
100 100 generation.
101 101 """
102 102 return self.manifest().matches(match)
103 103
104 104 def _matchstatus(self, other, match):
105 105 """return match.always if match is none
106 106
107 107 This internal method provides a way for child objects to override the
108 108 match operator.
109 109 """
110 110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 111
112 112 def _buildstatus(self, other, s, match, listignored, listclean,
113 113 listunknown):
114 114 """build a status with respect to another context"""
115 115 # Load earliest manifest first for caching reasons. More specifically,
116 116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 119 # delta to what's in the cache. So that's one full reconstruction + one
120 120 # delta application.
121 121 if self.rev() is not None and self.rev() < other.rev():
122 122 self.manifest()
123 123 mf1 = other._manifestmatches(match, s)
124 124 mf2 = self._manifestmatches(match, s)
125 125
126 126 modified, added = [], []
127 127 removed = []
128 128 clean = []
129 129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 130 deletedset = set(deleted)
131 131 d = mf1.diff(mf2, clean=listclean)
132 132 for fn, value in d.iteritems():
133 133 if fn in deletedset:
134 134 continue
135 135 if value is None:
136 136 clean.append(fn)
137 137 continue
138 138 (node1, flag1), (node2, flag2) = value
139 139 if node1 is None:
140 140 added.append(fn)
141 141 elif node2 is None:
142 142 removed.append(fn)
143 143 elif flag1 != flag2:
144 144 modified.append(fn)
145 145 elif node2 != _newnode:
146 146 # When comparing files between two commits, we save time by
147 147 # not comparing the file contents when the nodeids differ.
148 148 # Note that this means we incorrectly report a reverted change
149 149 # to a file as a modification.
150 150 modified.append(fn)
151 151 elif self[fn].cmp(other[fn]):
152 152 modified.append(fn)
153 153 else:
154 154 clean.append(fn)
155 155
156 156 if removed:
157 157 # need to filter files if they are already reported as removed
158 158 unknown = [fn for fn in unknown if fn not in mf1]
159 159 ignored = [fn for fn in ignored if fn not in mf1]
160 160 # if they're deleted, don't report them as removed
161 161 removed = [fn for fn in removed if fn not in deletedset]
162 162
163 163 return scmutil.status(modified, added, removed, deleted, unknown,
164 164 ignored, clean)
165 165
166 166 @propertycache
167 167 def substate(self):
168 168 return subrepo.state(self, self._repo.ui)
169 169
170 170 def subrev(self, subpath):
171 171 return self.substate[subpath][1]
172 172
173 173 def rev(self):
174 174 return self._rev
175 175 def node(self):
176 176 return self._node
177 177 def hex(self):
178 178 return hex(self.node())
179 179 def manifest(self):
180 180 return self._manifest
181 181 def repo(self):
182 182 return self._repo
183 183 def phasestr(self):
184 184 return phases.phasenames[self.phase()]
185 185 def mutable(self):
186 186 return self.phase() > phases.public
187 187
188 188 def getfileset(self, expr):
189 189 return fileset.getfileset(self, expr)
190 190
191 191 def obsolete(self):
192 192 """True if the changeset is obsolete"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 194
195 195 def extinct(self):
196 196 """True if the changeset is extinct"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 198
199 199 def unstable(self):
200 200 """True if the changeset is not obsolete but it's ancestor are"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202 202
203 203 def bumped(self):
204 204 """True if the changeset try to be a successor of a public changeset
205 205
206 206 Only non-public and non-obsolete changesets may be bumped.
207 207 """
208 208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209 209
210 210 def divergent(self):
211 211 """Is a successors of a changeset with multiple possible successors set
212 212
213 213 Only non-public and non-obsolete changesets may be divergent.
214 214 """
215 215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216 216
217 217 def troubled(self):
218 218 """True if the changeset is either unstable, bumped or divergent"""
219 219 return self.unstable() or self.bumped() or self.divergent()
220 220
221 221 def troubles(self):
222 222 """return the list of troubles affecting this changesets.
223 223
224 224 Troubles are returned as strings. possible values are:
225 225 - unstable,
226 226 - bumped,
227 227 - divergent.
228 228 """
229 229 troubles = []
230 230 if self.unstable():
231 231 troubles.append('unstable')
232 232 if self.bumped():
233 233 troubles.append('bumped')
234 234 if self.divergent():
235 235 troubles.append('divergent')
236 236 return troubles
237 237
238 238 def parents(self):
239 239 """return contexts for each parent changeset"""
240 240 return self._parents
241 241
242 242 def p1(self):
243 243 return self._parents[0]
244 244
245 245 def p2(self):
246 246 parents = self._parents
247 247 if len(parents) == 2:
248 248 return parents[1]
249 249 return changectx(self._repo, nullrev)
250 250
251 251 def _fileinfo(self, path):
252 252 if '_manifest' in self.__dict__:
253 253 try:
254 254 return self._manifest[path], self._manifest.flags(path)
255 255 except KeyError:
256 256 raise error.ManifestLookupError(self._node, path,
257 257 _('not found in manifest'))
258 258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 259 if path in self._manifestdelta:
260 260 return (self._manifestdelta[path],
261 261 self._manifestdelta.flags(path))
262 262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
263 263 if not node:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266
267 267 return node, flag
268 268
269 269 def filenode(self, path):
270 270 return self._fileinfo(path)[0]
271 271
272 272 def flags(self, path):
273 273 try:
274 274 return self._fileinfo(path)[1]
275 275 except error.LookupError:
276 276 return ''
277 277
278 def sub(self, path):
278 def sub(self, path, allowcreate=True):
279 279 '''return a subrepo for the stored revision of path, never wdir()'''
280 return subrepo.subrepo(self, path)
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281 281
282 282 def nullsub(self, path, pctx):
283 283 return subrepo.nullsubrepo(self, path, pctx)
284 284
285 285 def workingsub(self, path):
286 286 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 287 context.
288 288 '''
289 289 return subrepo.subrepo(self, path, allowwdir=True)
290 290
291 291 def match(self, pats=[], include=None, exclude=None, default='glob',
292 292 listsubrepos=False, badfn=None):
293 293 r = self._repo
294 294 return matchmod.match(r.root, r.getcwd(), pats,
295 295 include, exclude, default,
296 296 auditor=r.nofsauditor, ctx=self,
297 297 listsubrepos=listsubrepos, badfn=badfn)
298 298
299 299 def diff(self, ctx2=None, match=None, **opts):
300 300 """Returns a diff generator for the given contexts and matcher"""
301 301 if ctx2 is None:
302 302 ctx2 = self.p1()
303 303 if ctx2 is not None:
304 304 ctx2 = self._repo[ctx2]
305 305 diffopts = patch.diffopts(self._repo.ui, opts)
306 306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307 307
308 308 def dirs(self):
309 309 return self._manifest.dirs()
310 310
311 311 def hasdir(self, dir):
312 312 return self._manifest.hasdir(dir)
313 313
314 314 def dirty(self, missing=False, merge=True, branch=True):
315 315 return False
316 316
317 317 def status(self, other=None, match=None, listignored=False,
318 318 listclean=False, listunknown=False, listsubrepos=False):
319 319 """return status of files between two nodes or node and working
320 320 directory.
321 321
322 322 If other is None, compare this node with working directory.
323 323
324 324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 325 """
326 326
327 327 ctx1 = self
328 328 ctx2 = self._repo[other]
329 329
330 330 # This next code block is, admittedly, fragile logic that tests for
331 331 # reversing the contexts and wouldn't need to exist if it weren't for
332 332 # the fast (and common) code path of comparing the working directory
333 333 # with its first parent.
334 334 #
335 335 # What we're aiming for here is the ability to call:
336 336 #
337 337 # workingctx.status(parentctx)
338 338 #
339 339 # If we always built the manifest for each context and compared those,
340 340 # then we'd be done. But the special case of the above call means we
341 341 # just copy the manifest of the parent.
342 342 reversed = False
343 343 if (not isinstance(ctx1, changectx)
344 344 and isinstance(ctx2, changectx)):
345 345 reversed = True
346 346 ctx1, ctx2 = ctx2, ctx1
347 347
348 348 match = ctx2._matchstatus(ctx1, match)
349 349 r = scmutil.status([], [], [], [], [], [], [])
350 350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 351 listunknown)
352 352
353 353 if reversed:
354 354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 355 # these make no sense to reverse.
356 356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 357 r.clean)
358 358
359 359 if listsubrepos:
360 360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 361 try:
362 362 rev2 = ctx2.subrev(subpath)
363 363 except KeyError:
364 364 # A subrepo that existed in node1 was deleted between
365 365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 366 # won't contain that subpath. The best we can do ignore it.
367 367 rev2 = None
368 368 submatch = matchmod.subdirmatcher(subpath, match)
369 369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 370 clean=listclean, unknown=listunknown,
371 371 listsubrepos=True)
372 372 for rfiles, sfiles in zip(r, s):
373 373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374 374
375 375 for l in r:
376 376 l.sort()
377 377
378 378 return r
379 379
380 380
381 381 def makememctx(repo, parents, text, user, date, branch, files, store,
382 382 editor=None, extra=None):
383 383 def getfilectx(repo, memctx, path):
384 384 data, mode, copied = store.getfile(path)
385 385 if data is None:
386 386 return None
387 387 islink, isexec = mode
388 388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 389 copied=copied, memctx=memctx)
390 390 if extra is None:
391 391 extra = {}
392 392 if branch:
393 393 extra['branch'] = encoding.fromlocal(branch)
394 394 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 395 date, extra, editor)
396 396 return ctx
397 397
398 398 class changectx(basectx):
399 399 """A changecontext object makes access to data related to a particular
400 400 changeset convenient. It represents a read-only context already present in
401 401 the repo."""
402 402 def __init__(self, repo, changeid=''):
403 403 """changeid is a revision number, node, or tag"""
404 404
405 405 # since basectx.__new__ already took care of copying the object, we
406 406 # don't need to do anything in __init__, so we just exit here
407 407 if isinstance(changeid, basectx):
408 408 return
409 409
410 410 if changeid == '':
411 411 changeid = '.'
412 412 self._repo = repo
413 413
414 414 try:
415 415 if isinstance(changeid, int):
416 416 self._node = repo.changelog.node(changeid)
417 417 self._rev = changeid
418 418 return
419 419 if isinstance(changeid, long):
420 420 changeid = str(changeid)
421 421 if changeid == 'null':
422 422 self._node = nullid
423 423 self._rev = nullrev
424 424 return
425 425 if changeid == 'tip':
426 426 self._node = repo.changelog.tip()
427 427 self._rev = repo.changelog.rev(self._node)
428 428 return
429 429 if changeid == '.' or changeid == repo.dirstate.p1():
430 430 # this is a hack to delay/avoid loading obsmarkers
431 431 # when we know that '.' won't be hidden
432 432 self._node = repo.dirstate.p1()
433 433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 434 return
435 435 if len(changeid) == 20:
436 436 try:
437 437 self._node = changeid
438 438 self._rev = repo.changelog.rev(changeid)
439 439 return
440 440 except error.FilteredRepoLookupError:
441 441 raise
442 442 except LookupError:
443 443 pass
444 444
445 445 try:
446 446 r = int(changeid)
447 447 if str(r) != changeid:
448 448 raise ValueError
449 449 l = len(repo.changelog)
450 450 if r < 0:
451 451 r += l
452 452 if r < 0 or r >= l:
453 453 raise ValueError
454 454 self._rev = r
455 455 self._node = repo.changelog.node(r)
456 456 return
457 457 except error.FilteredIndexError:
458 458 raise
459 459 except (ValueError, OverflowError, IndexError):
460 460 pass
461 461
462 462 if len(changeid) == 40:
463 463 try:
464 464 self._node = bin(changeid)
465 465 self._rev = repo.changelog.rev(self._node)
466 466 return
467 467 except error.FilteredLookupError:
468 468 raise
469 469 except (TypeError, LookupError):
470 470 pass
471 471
472 472 # lookup bookmarks through the name interface
473 473 try:
474 474 self._node = repo.names.singlenode(repo, changeid)
475 475 self._rev = repo.changelog.rev(self._node)
476 476 return
477 477 except KeyError:
478 478 pass
479 479 except error.FilteredRepoLookupError:
480 480 raise
481 481 except error.RepoLookupError:
482 482 pass
483 483
484 484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 485 if self._node is not None:
486 486 self._rev = repo.changelog.rev(self._node)
487 487 return
488 488
489 489 # lookup failed
490 490 # check if it might have come from damaged dirstate
491 491 #
492 492 # XXX we could avoid the unfiltered if we had a recognizable
493 493 # exception for filtered changeset access
494 494 if changeid in repo.unfiltered().dirstate.parents():
495 495 msg = _("working directory has unknown parent '%s'!")
496 496 raise error.Abort(msg % short(changeid))
497 497 try:
498 498 if len(changeid) == 20 and nonascii(changeid):
499 499 changeid = hex(changeid)
500 500 except TypeError:
501 501 pass
502 502 except (error.FilteredIndexError, error.FilteredLookupError,
503 503 error.FilteredRepoLookupError):
504 504 if repo.filtername.startswith('visible'):
505 505 msg = _("hidden revision '%s'") % changeid
506 506 hint = _('use --hidden to access hidden revisions')
507 507 raise error.FilteredRepoLookupError(msg, hint=hint)
508 508 msg = _("filtered revision '%s' (not in '%s' subset)")
509 509 msg %= (changeid, repo.filtername)
510 510 raise error.FilteredRepoLookupError(msg)
511 511 except IndexError:
512 512 pass
513 513 raise error.RepoLookupError(
514 514 _("unknown revision '%s'") % changeid)
515 515
516 516 def __hash__(self):
517 517 try:
518 518 return hash(self._rev)
519 519 except AttributeError:
520 520 return id(self)
521 521
522 522 def __nonzero__(self):
523 523 return self._rev != nullrev
524 524
525 525 @propertycache
526 526 def _changeset(self):
527 527 return self._repo.changelog.changelogrevision(self.rev())
528 528
529 529 @propertycache
530 530 def _manifest(self):
531 531 return self._repo.manifest.read(self._changeset.manifest)
532 532
533 533 @propertycache
534 534 def _manifestdelta(self):
535 535 return self._repo.manifest.readdelta(self._changeset.manifest)
536 536
537 537 @propertycache
538 538 def _parents(self):
539 539 repo = self._repo
540 540 p1, p2 = repo.changelog.parentrevs(self._rev)
541 541 if p2 == nullrev:
542 542 return [changectx(repo, p1)]
543 543 return [changectx(repo, p1), changectx(repo, p2)]
544 544
545 545 def changeset(self):
546 546 c = self._changeset
547 547 return (
548 548 c.manifest,
549 549 c.user,
550 550 c.date,
551 551 c.files,
552 552 c.description,
553 553 c.extra,
554 554 )
555 555 def manifestnode(self):
556 556 return self._changeset.manifest
557 557
558 558 def user(self):
559 559 return self._changeset.user
560 560 def date(self):
561 561 return self._changeset.date
562 562 def files(self):
563 563 return self._changeset.files
564 564 def description(self):
565 565 return self._changeset.description
566 566 def branch(self):
567 567 return encoding.tolocal(self._changeset.extra.get("branch"))
568 568 def closesbranch(self):
569 569 return 'close' in self._changeset.extra
570 570 def extra(self):
571 571 return self._changeset.extra
572 572 def tags(self):
573 573 return self._repo.nodetags(self._node)
574 574 def bookmarks(self):
575 575 return self._repo.nodebookmarks(self._node)
576 576 def phase(self):
577 577 return self._repo._phasecache.phase(self._repo, self._rev)
578 578 def hidden(self):
579 579 return self._rev in repoview.filterrevs(self._repo, 'visible')
580 580
581 581 def children(self):
582 582 """return contexts for each child changeset"""
583 583 c = self._repo.changelog.children(self._node)
584 584 return [changectx(self._repo, x) for x in c]
585 585
586 586 def ancestors(self):
587 587 for a in self._repo.changelog.ancestors([self._rev]):
588 588 yield changectx(self._repo, a)
589 589
590 590 def descendants(self):
591 591 for d in self._repo.changelog.descendants([self._rev]):
592 592 yield changectx(self._repo, d)
593 593
594 594 def filectx(self, path, fileid=None, filelog=None):
595 595 """get a file context from this changeset"""
596 596 if fileid is None:
597 597 fileid = self.filenode(path)
598 598 return filectx(self._repo, path, fileid=fileid,
599 599 changectx=self, filelog=filelog)
600 600
601 601 def ancestor(self, c2, warn=False):
602 602 """return the "best" ancestor context of self and c2
603 603
604 604 If there are multiple candidates, it will show a message and check
605 605 merge.preferancestor configuration before falling back to the
606 606 revlog ancestor."""
607 607 # deal with workingctxs
608 608 n2 = c2._node
609 609 if n2 is None:
610 610 n2 = c2._parents[0]._node
611 611 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
612 612 if not cahs:
613 613 anc = nullid
614 614 elif len(cahs) == 1:
615 615 anc = cahs[0]
616 616 else:
617 617 # experimental config: merge.preferancestor
618 618 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
619 619 try:
620 620 ctx = changectx(self._repo, r)
621 621 except error.RepoLookupError:
622 622 continue
623 623 anc = ctx.node()
624 624 if anc in cahs:
625 625 break
626 626 else:
627 627 anc = self._repo.changelog.ancestor(self._node, n2)
628 628 if warn:
629 629 self._repo.ui.status(
630 630 (_("note: using %s as ancestor of %s and %s\n") %
631 631 (short(anc), short(self._node), short(n2))) +
632 632 ''.join(_(" alternatively, use --config "
633 633 "merge.preferancestor=%s\n") %
634 634 short(n) for n in sorted(cahs) if n != anc))
635 635 return changectx(self._repo, anc)
636 636
637 637 def descendant(self, other):
638 638 """True if other is descendant of this changeset"""
639 639 return self._repo.changelog.descendant(self._rev, other._rev)
640 640
641 641 def walk(self, match):
642 642 '''Generates matching file names.'''
643 643
644 644 # Wrap match.bad method to have message with nodeid
645 645 def bad(fn, msg):
646 646 # The manifest doesn't know about subrepos, so don't complain about
647 647 # paths into valid subrepos.
648 648 if any(fn == s or fn.startswith(s + '/')
649 649 for s in self.substate):
650 650 return
651 651 match.bad(fn, _('no such file in rev %s') % self)
652 652
653 653 m = matchmod.badmatch(match, bad)
654 654 return self._manifest.walk(m)
655 655
656 656 def matches(self, match):
657 657 return self.walk(match)
658 658
659 659 class basefilectx(object):
660 660 """A filecontext object represents the common logic for its children:
661 661 filectx: read-only access to a filerevision that is already present
662 662 in the repo,
663 663 workingfilectx: a filecontext that represents files from the working
664 664 directory,
665 665 memfilectx: a filecontext that represents files in-memory."""
666 666 def __new__(cls, repo, path, *args, **kwargs):
667 667 return super(basefilectx, cls).__new__(cls)
668 668
669 669 @propertycache
670 670 def _filelog(self):
671 671 return self._repo.file(self._path)
672 672
673 673 @propertycache
674 674 def _changeid(self):
675 675 if '_changeid' in self.__dict__:
676 676 return self._changeid
677 677 elif '_changectx' in self.__dict__:
678 678 return self._changectx.rev()
679 679 elif '_descendantrev' in self.__dict__:
680 680 # this file context was created from a revision with a known
681 681 # descendant, we can (lazily) correct for linkrev aliases
682 682 return self._adjustlinkrev(self._path, self._filelog,
683 683 self._filenode, self._descendantrev)
684 684 else:
685 685 return self._filelog.linkrev(self._filerev)
686 686
687 687 @propertycache
688 688 def _filenode(self):
689 689 if '_fileid' in self.__dict__:
690 690 return self._filelog.lookup(self._fileid)
691 691 else:
692 692 return self._changectx.filenode(self._path)
693 693
694 694 @propertycache
695 695 def _filerev(self):
696 696 return self._filelog.rev(self._filenode)
697 697
698 698 @propertycache
699 699 def _repopath(self):
700 700 return self._path
701 701
702 702 def __nonzero__(self):
703 703 try:
704 704 self._filenode
705 705 return True
706 706 except error.LookupError:
707 707 # file is missing
708 708 return False
709 709
710 710 def __str__(self):
711 711 return "%s@%s" % (self.path(), self._changectx)
712 712
713 713 def __repr__(self):
714 714 return "<%s %s>" % (type(self).__name__, str(self))
715 715
716 716 def __hash__(self):
717 717 try:
718 718 return hash((self._path, self._filenode))
719 719 except AttributeError:
720 720 return id(self)
721 721
722 722 def __eq__(self, other):
723 723 try:
724 724 return (type(self) == type(other) and self._path == other._path
725 725 and self._filenode == other._filenode)
726 726 except AttributeError:
727 727 return False
728 728
729 729 def __ne__(self, other):
730 730 return not (self == other)
731 731
732 732 def filerev(self):
733 733 return self._filerev
734 734 def filenode(self):
735 735 return self._filenode
736 736 def flags(self):
737 737 return self._changectx.flags(self._path)
738 738 def filelog(self):
739 739 return self._filelog
740 740 def rev(self):
741 741 return self._changeid
742 742 def linkrev(self):
743 743 return self._filelog.linkrev(self._filerev)
744 744 def node(self):
745 745 return self._changectx.node()
746 746 def hex(self):
747 747 return self._changectx.hex()
748 748 def user(self):
749 749 return self._changectx.user()
750 750 def date(self):
751 751 return self._changectx.date()
752 752 def files(self):
753 753 return self._changectx.files()
754 754 def description(self):
755 755 return self._changectx.description()
756 756 def branch(self):
757 757 return self._changectx.branch()
758 758 def extra(self):
759 759 return self._changectx.extra()
760 760 def phase(self):
761 761 return self._changectx.phase()
762 762 def phasestr(self):
763 763 return self._changectx.phasestr()
764 764 def manifest(self):
765 765 return self._changectx.manifest()
766 766 def changectx(self):
767 767 return self._changectx
768 768 def repo(self):
769 769 return self._repo
770 770
771 771 def path(self):
772 772 return self._path
773 773
774 774 def isbinary(self):
775 775 try:
776 776 return util.binary(self.data())
777 777 except IOError:
778 778 return False
779 779 def isexec(self):
780 780 return 'x' in self.flags()
781 781 def islink(self):
782 782 return 'l' in self.flags()
783 783
784 784 def isabsent(self):
785 785 """whether this filectx represents a file not in self._changectx
786 786
787 787 This is mainly for merge code to detect change/delete conflicts. This is
788 788 expected to be True for all subclasses of basectx."""
789 789 return False
790 790
791 791 _customcmp = False
792 792 def cmp(self, fctx):
793 793 """compare with other file context
794 794
795 795 returns True if different than fctx.
796 796 """
797 797 if fctx._customcmp:
798 798 return fctx.cmp(self)
799 799
800 800 if (fctx._filenode is None
801 801 and (self._repo._encodefilterpats
802 802 # if file data starts with '\1\n', empty metadata block is
803 803 # prepended, which adds 4 bytes to filelog.size().
804 804 or self.size() - 4 == fctx.size())
805 805 or self.size() == fctx.size()):
806 806 return self._filelog.cmp(self._filenode, fctx.data())
807 807
808 808 return True
809 809
810 810 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
811 811 """return the first ancestor of <srcrev> introducing <fnode>
812 812
813 813 If the linkrev of the file revision does not point to an ancestor of
814 814 srcrev, we'll walk down the ancestors until we find one introducing
815 815 this file revision.
816 816
817 817 :repo: a localrepository object (used to access changelog and manifest)
818 818 :path: the file path
819 819 :fnode: the nodeid of the file revision
820 820 :filelog: the filelog of this path
821 821 :srcrev: the changeset revision we search ancestors from
822 822 :inclusive: if true, the src revision will also be checked
823 823 """
824 824 repo = self._repo
825 825 cl = repo.unfiltered().changelog
826 826 ma = repo.manifest
827 827 # fetch the linkrev
828 828 fr = filelog.rev(fnode)
829 829 lkr = filelog.linkrev(fr)
830 830 # hack to reuse ancestor computation when searching for renames
831 831 memberanc = getattr(self, '_ancestrycontext', None)
832 832 iteranc = None
833 833 if srcrev is None:
834 834 # wctx case, used by workingfilectx during mergecopy
835 835 revs = [p.rev() for p in self._repo[None].parents()]
836 836 inclusive = True # we skipped the real (revless) source
837 837 else:
838 838 revs = [srcrev]
839 839 if memberanc is None:
840 840 memberanc = iteranc = cl.ancestors(revs, lkr,
841 841 inclusive=inclusive)
842 842 # check if this linkrev is an ancestor of srcrev
843 843 if lkr not in memberanc:
844 844 if iteranc is None:
845 845 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
846 846 for a in iteranc:
847 847 ac = cl.read(a) # get changeset data (we avoid object creation)
848 848 if path in ac[3]: # checking the 'files' field.
849 849 # The file has been touched, check if the content is
850 850 # similar to the one we search for.
851 851 if fnode == ma.readfast(ac[0]).get(path):
852 852 return a
853 853 # In theory, we should never get out of that loop without a result.
854 854 # But if manifest uses a buggy file revision (not children of the
855 855 # one it replaces) we could. Such a buggy situation will likely
856 856 # result is crash somewhere else at to some point.
857 857 return lkr
858 858
859 859 def introrev(self):
860 860 """return the rev of the changeset which introduced this file revision
861 861
862 862 This method is different from linkrev because it take into account the
863 863 changeset the filectx was created from. It ensures the returned
864 864 revision is one of its ancestors. This prevents bugs from
865 865 'linkrev-shadowing' when a file revision is used by multiple
866 866 changesets.
867 867 """
868 868 lkr = self.linkrev()
869 869 attrs = vars(self)
870 870 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
871 871 if noctx or self.rev() == lkr:
872 872 return self.linkrev()
873 873 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
874 874 self.rev(), inclusive=True)
875 875
876 876 def _parentfilectx(self, path, fileid, filelog):
877 877 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
878 878 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
879 879 if '_changeid' in vars(self) or '_changectx' in vars(self):
880 880 # If self is associated with a changeset (probably explicitly
881 881 # fed), ensure the created filectx is associated with a
882 882 # changeset that is an ancestor of self.changectx.
883 883 # This lets us later use _adjustlinkrev to get a correct link.
884 884 fctx._descendantrev = self.rev()
885 885 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
886 886 elif '_descendantrev' in vars(self):
887 887 # Otherwise propagate _descendantrev if we have one associated.
888 888 fctx._descendantrev = self._descendantrev
889 889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 890 return fctx
891 891
892 892 def parents(self):
893 893 _path = self._path
894 894 fl = self._filelog
895 895 parents = self._filelog.parents(self._filenode)
896 896 pl = [(_path, node, fl) for node in parents if node != nullid]
897 897
898 898 r = fl.renamed(self._filenode)
899 899 if r:
900 900 # - In the simple rename case, both parent are nullid, pl is empty.
901 901 # - In case of merge, only one of the parent is null id and should
902 902 # be replaced with the rename information. This parent is -always-
903 903 # the first one.
904 904 #
905 905 # As null id have always been filtered out in the previous list
906 906 # comprehension, inserting to 0 will always result in "replacing
907 907 # first nullid parent with rename information.
908 908 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
909 909
910 910 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
911 911
912 912 def p1(self):
913 913 return self.parents()[0]
914 914
915 915 def p2(self):
916 916 p = self.parents()
917 917 if len(p) == 2:
918 918 return p[1]
919 919 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
920 920
921 921 def annotate(self, follow=False, linenumber=None, diffopts=None):
922 922 '''returns a list of tuples of (ctx, line) for each line
923 923 in the file, where ctx is the filectx of the node where
924 924 that line was last changed.
925 925 This returns tuples of ((ctx, linenumber), line) for each line,
926 926 if "linenumber" parameter is NOT "None".
927 927 In such tuples, linenumber means one at the first appearance
928 928 in the managed file.
929 929 To reduce annotation cost,
930 930 this returns fixed value(False is used) as linenumber,
931 931 if "linenumber" parameter is "False".'''
932 932
933 933 if linenumber is None:
934 934 def decorate(text, rev):
935 935 return ([rev] * len(text.splitlines()), text)
936 936 elif linenumber:
937 937 def decorate(text, rev):
938 938 size = len(text.splitlines())
939 939 return ([(rev, i) for i in xrange(1, size + 1)], text)
940 940 else:
941 941 def decorate(text, rev):
942 942 return ([(rev, False)] * len(text.splitlines()), text)
943 943
944 944 def pair(parent, child):
945 945 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
946 946 refine=True)
947 947 for (a1, a2, b1, b2), t in blocks:
948 948 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 949 # belong to the child.
950 950 if t == '=':
951 951 child[0][b1:b2] = parent[0][a1:a2]
952 952 return child
953 953
954 954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955 955
956 956 def parents(f):
957 957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 960 # isn't an ancestor of the srcrev.
961 961 f._changeid
962 962 pl = f.parents()
963 963
964 964 # Don't return renamed parents if we aren't following.
965 965 if not follow:
966 966 pl = [p for p in pl if p.path() == f.path()]
967 967
968 968 # renamed filectx won't have a filelog yet, so set it
969 969 # from the cache to save time
970 970 for p in pl:
971 971 if not '_filelog' in p.__dict__:
972 972 p._filelog = getlog(p.path())
973 973
974 974 return pl
975 975
976 976 # use linkrev to find the first changeset where self appeared
977 977 base = self
978 978 introrev = self.introrev()
979 979 if self.rev() != introrev:
980 980 base = self.filectx(self.filenode(), changeid=introrev)
981 981 if getattr(base, '_ancestrycontext', None) is None:
982 982 cl = self._repo.changelog
983 983 if introrev is None:
984 984 # wctx is not inclusive, but works because _ancestrycontext
985 985 # is used to test filelog revisions
986 986 ac = cl.ancestors([p.rev() for p in base.parents()],
987 987 inclusive=True)
988 988 else:
989 989 ac = cl.ancestors([introrev], inclusive=True)
990 990 base._ancestrycontext = ac
991 991
992 992 # This algorithm would prefer to be recursive, but Python is a
993 993 # bit recursion-hostile. Instead we do an iterative
994 994 # depth-first search.
995 995
996 996 visit = [base]
997 997 hist = {}
998 998 pcache = {}
999 999 needed = {base: 1}
1000 1000 while visit:
1001 1001 f = visit[-1]
1002 1002 pcached = f in pcache
1003 1003 if not pcached:
1004 1004 pcache[f] = parents(f)
1005 1005
1006 1006 ready = True
1007 1007 pl = pcache[f]
1008 1008 for p in pl:
1009 1009 if p not in hist:
1010 1010 ready = False
1011 1011 visit.append(p)
1012 1012 if not pcached:
1013 1013 needed[p] = needed.get(p, 0) + 1
1014 1014 if ready:
1015 1015 visit.pop()
1016 1016 reusable = f in hist
1017 1017 if reusable:
1018 1018 curr = hist[f]
1019 1019 else:
1020 1020 curr = decorate(f.data(), f)
1021 1021 for p in pl:
1022 1022 if not reusable:
1023 1023 curr = pair(hist[p], curr)
1024 1024 if needed[p] == 1:
1025 1025 del hist[p]
1026 1026 del needed[p]
1027 1027 else:
1028 1028 needed[p] -= 1
1029 1029
1030 1030 hist[f] = curr
1031 1031 pcache[f] = []
1032 1032
1033 1033 return zip(hist[base][0], hist[base][1].splitlines(True))
1034 1034
1035 1035 def ancestors(self, followfirst=False):
1036 1036 visit = {}
1037 1037 c = self
1038 1038 if followfirst:
1039 1039 cut = 1
1040 1040 else:
1041 1041 cut = None
1042 1042
1043 1043 while True:
1044 1044 for parent in c.parents()[:cut]:
1045 1045 visit[(parent.linkrev(), parent.filenode())] = parent
1046 1046 if not visit:
1047 1047 break
1048 1048 c = visit.pop(max(visit))
1049 1049 yield c
1050 1050
1051 1051 class filectx(basefilectx):
1052 1052 """A filecontext object makes access to data related to a particular
1053 1053 filerevision convenient."""
1054 1054 def __init__(self, repo, path, changeid=None, fileid=None,
1055 1055 filelog=None, changectx=None):
1056 1056 """changeid can be a changeset revision, node, or tag.
1057 1057 fileid can be a file revision or node."""
1058 1058 self._repo = repo
1059 1059 self._path = path
1060 1060
1061 1061 assert (changeid is not None
1062 1062 or fileid is not None
1063 1063 or changectx is not None), \
1064 1064 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1065 1065 % (changeid, fileid, changectx))
1066 1066
1067 1067 if filelog is not None:
1068 1068 self._filelog = filelog
1069 1069
1070 1070 if changeid is not None:
1071 1071 self._changeid = changeid
1072 1072 if changectx is not None:
1073 1073 self._changectx = changectx
1074 1074 if fileid is not None:
1075 1075 self._fileid = fileid
1076 1076
1077 1077 @propertycache
1078 1078 def _changectx(self):
1079 1079 try:
1080 1080 return changectx(self._repo, self._changeid)
1081 1081 except error.FilteredRepoLookupError:
1082 1082 # Linkrev may point to any revision in the repository. When the
1083 1083 # repository is filtered this may lead to `filectx` trying to build
1084 1084 # `changectx` for filtered revision. In such case we fallback to
1085 1085 # creating `changectx` on the unfiltered version of the reposition.
1086 1086 # This fallback should not be an issue because `changectx` from
1087 1087 # `filectx` are not used in complex operations that care about
1088 1088 # filtering.
1089 1089 #
1090 1090 # This fallback is a cheap and dirty fix that prevent several
1091 1091 # crashes. It does not ensure the behavior is correct. However the
1092 1092 # behavior was not correct before filtering either and "incorrect
1093 1093 # behavior" is seen as better as "crash"
1094 1094 #
1095 1095 # Linkrevs have several serious troubles with filtering that are
1096 1096 # complicated to solve. Proper handling of the issue here should be
1097 1097 # considered when solving linkrev issue are on the table.
1098 1098 return changectx(self._repo.unfiltered(), self._changeid)
1099 1099
1100 1100 def filectx(self, fileid, changeid=None):
1101 1101 '''opens an arbitrary revision of the file without
1102 1102 opening a new filelog'''
1103 1103 return filectx(self._repo, self._path, fileid=fileid,
1104 1104 filelog=self._filelog, changeid=changeid)
1105 1105
1106 1106 def data(self):
1107 1107 try:
1108 1108 return self._filelog.read(self._filenode)
1109 1109 except error.CensoredNodeError:
1110 1110 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1111 1111 return ""
1112 1112 raise error.Abort(_("censored node: %s") % short(self._filenode),
1113 1113 hint=_("set censor.policy to ignore errors"))
1114 1114
1115 1115 def size(self):
1116 1116 return self._filelog.size(self._filerev)
1117 1117
1118 1118 def renamed(self):
1119 1119 """check if file was actually renamed in this changeset revision
1120 1120
1121 1121 If rename logged in file revision, we report copy for changeset only
1122 1122 if file revisions linkrev points back to the changeset in question
1123 1123 or both changeset parents contain different file revisions.
1124 1124 """
1125 1125
1126 1126 renamed = self._filelog.renamed(self._filenode)
1127 1127 if not renamed:
1128 1128 return renamed
1129 1129
1130 1130 if self.rev() == self.linkrev():
1131 1131 return renamed
1132 1132
1133 1133 name = self.path()
1134 1134 fnode = self._filenode
1135 1135 for p in self._changectx.parents():
1136 1136 try:
1137 1137 if fnode == p.filenode(name):
1138 1138 return None
1139 1139 except error.LookupError:
1140 1140 pass
1141 1141 return renamed
1142 1142
1143 1143 def children(self):
1144 1144 # hard for renames
1145 1145 c = self._filelog.children(self._filenode)
1146 1146 return [filectx(self._repo, self._path, fileid=x,
1147 1147 filelog=self._filelog) for x in c]
1148 1148
1149 1149 class committablectx(basectx):
1150 1150 """A committablectx object provides common functionality for a context that
1151 1151 wants the ability to commit, e.g. workingctx or memctx."""
1152 1152 def __init__(self, repo, text="", user=None, date=None, extra=None,
1153 1153 changes=None):
1154 1154 self._repo = repo
1155 1155 self._rev = None
1156 1156 self._node = None
1157 1157 self._text = text
1158 1158 if date:
1159 1159 self._date = util.parsedate(date)
1160 1160 if user:
1161 1161 self._user = user
1162 1162 if changes:
1163 1163 self._status = changes
1164 1164
1165 1165 self._extra = {}
1166 1166 if extra:
1167 1167 self._extra = extra.copy()
1168 1168 if 'branch' not in self._extra:
1169 1169 try:
1170 1170 branch = encoding.fromlocal(self._repo.dirstate.branch())
1171 1171 except UnicodeDecodeError:
1172 1172 raise error.Abort(_('branch name not in UTF-8!'))
1173 1173 self._extra['branch'] = branch
1174 1174 if self._extra['branch'] == '':
1175 1175 self._extra['branch'] = 'default'
1176 1176
1177 1177 def __str__(self):
1178 1178 return str(self._parents[0]) + "+"
1179 1179
1180 1180 def __nonzero__(self):
1181 1181 return True
1182 1182
1183 1183 def _buildflagfunc(self):
1184 1184 # Create a fallback function for getting file flags when the
1185 1185 # filesystem doesn't support them
1186 1186
1187 1187 copiesget = self._repo.dirstate.copies().get
1188 1188 parents = self.parents()
1189 1189 if len(parents) < 2:
1190 1190 # when we have one parent, it's easy: copy from parent
1191 1191 man = parents[0].manifest()
1192 1192 def func(f):
1193 1193 f = copiesget(f, f)
1194 1194 return man.flags(f)
1195 1195 else:
1196 1196 # merges are tricky: we try to reconstruct the unstored
1197 1197 # result from the merge (issue1802)
1198 1198 p1, p2 = parents
1199 1199 pa = p1.ancestor(p2)
1200 1200 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1201 1201
1202 1202 def func(f):
1203 1203 f = copiesget(f, f) # may be wrong for merges with copies
1204 1204 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1205 1205 if fl1 == fl2:
1206 1206 return fl1
1207 1207 if fl1 == fla:
1208 1208 return fl2
1209 1209 if fl2 == fla:
1210 1210 return fl1
1211 1211 return '' # punt for conflicts
1212 1212
1213 1213 return func
1214 1214
1215 1215 @propertycache
1216 1216 def _flagfunc(self):
1217 1217 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1218 1218
1219 1219 @propertycache
1220 1220 def _manifest(self):
1221 1221 """generate a manifest corresponding to the values in self._status
1222 1222
1223 1223 This reuse the file nodeid from parent, but we append an extra letter
1224 1224 when modified. Modified files get an extra 'm' while added files get
1225 1225 an extra 'a'. This is used by manifests merge to see that files
1226 1226 are different and by update logic to avoid deleting newly added files.
1227 1227 """
1228 1228 parents = self.parents()
1229 1229
1230 1230 man1 = parents[0].manifest()
1231 1231 man = man1.copy()
1232 1232 if len(parents) > 1:
1233 1233 man2 = self.p2().manifest()
1234 1234 def getman(f):
1235 1235 if f in man1:
1236 1236 return man1
1237 1237 return man2
1238 1238 else:
1239 1239 getman = lambda f: man1
1240 1240
1241 1241 copied = self._repo.dirstate.copies()
1242 1242 ff = self._flagfunc
1243 1243 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1244 1244 for f in l:
1245 1245 orig = copied.get(f, f)
1246 1246 man[f] = getman(orig).get(orig, nullid) + i
1247 1247 try:
1248 1248 man.setflag(f, ff(f))
1249 1249 except OSError:
1250 1250 pass
1251 1251
1252 1252 for f in self._status.deleted + self._status.removed:
1253 1253 if f in man:
1254 1254 del man[f]
1255 1255
1256 1256 return man
1257 1257
1258 1258 @propertycache
1259 1259 def _status(self):
1260 1260 return self._repo.status()
1261 1261
1262 1262 @propertycache
1263 1263 def _user(self):
1264 1264 return self._repo.ui.username()
1265 1265
1266 1266 @propertycache
1267 1267 def _date(self):
1268 1268 return util.makedate()
1269 1269
1270 1270 def subrev(self, subpath):
1271 1271 return None
1272 1272
1273 1273 def manifestnode(self):
1274 1274 return None
1275 1275 def user(self):
1276 1276 return self._user or self._repo.ui.username()
1277 1277 def date(self):
1278 1278 return self._date
1279 1279 def description(self):
1280 1280 return self._text
1281 1281 def files(self):
1282 1282 return sorted(self._status.modified + self._status.added +
1283 1283 self._status.removed)
1284 1284
1285 1285 def modified(self):
1286 1286 return self._status.modified
1287 1287 def added(self):
1288 1288 return self._status.added
1289 1289 def removed(self):
1290 1290 return self._status.removed
1291 1291 def deleted(self):
1292 1292 return self._status.deleted
1293 1293 def branch(self):
1294 1294 return encoding.tolocal(self._extra['branch'])
1295 1295 def closesbranch(self):
1296 1296 return 'close' in self._extra
1297 1297 def extra(self):
1298 1298 return self._extra
1299 1299
1300 1300 def tags(self):
1301 1301 return []
1302 1302
1303 1303 def bookmarks(self):
1304 1304 b = []
1305 1305 for p in self.parents():
1306 1306 b.extend(p.bookmarks())
1307 1307 return b
1308 1308
1309 1309 def phase(self):
1310 1310 phase = phases.draft # default phase to draft
1311 1311 for p in self.parents():
1312 1312 phase = max(phase, p.phase())
1313 1313 return phase
1314 1314
1315 1315 def hidden(self):
1316 1316 return False
1317 1317
1318 1318 def children(self):
1319 1319 return []
1320 1320
1321 1321 def flags(self, path):
1322 1322 if '_manifest' in self.__dict__:
1323 1323 try:
1324 1324 return self._manifest.flags(path)
1325 1325 except KeyError:
1326 1326 return ''
1327 1327
1328 1328 try:
1329 1329 return self._flagfunc(path)
1330 1330 except OSError:
1331 1331 return ''
1332 1332
1333 1333 def ancestor(self, c2):
1334 1334 """return the "best" ancestor context of self and c2"""
1335 1335 return self._parents[0].ancestor(c2) # punt on two parents for now
1336 1336
1337 1337 def walk(self, match):
1338 1338 '''Generates matching file names.'''
1339 1339 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1340 1340 True, False))
1341 1341
1342 1342 def matches(self, match):
1343 1343 return sorted(self._repo.dirstate.matches(match))
1344 1344
1345 1345 def ancestors(self):
1346 1346 for p in self._parents:
1347 1347 yield p
1348 1348 for a in self._repo.changelog.ancestors(
1349 1349 [p.rev() for p in self._parents]):
1350 1350 yield changectx(self._repo, a)
1351 1351
1352 1352 def markcommitted(self, node):
1353 1353 """Perform post-commit cleanup necessary after committing this ctx
1354 1354
1355 1355 Specifically, this updates backing stores this working context
1356 1356 wraps to reflect the fact that the changes reflected by this
1357 1357 workingctx have been committed. For example, it marks
1358 1358 modified and added files as normal in the dirstate.
1359 1359
1360 1360 """
1361 1361
1362 1362 self._repo.dirstate.beginparentchange()
1363 1363 for f in self.modified() + self.added():
1364 1364 self._repo.dirstate.normal(f)
1365 1365 for f in self.removed():
1366 1366 self._repo.dirstate.drop(f)
1367 1367 self._repo.dirstate.setparents(node)
1368 1368 self._repo.dirstate.endparentchange()
1369 1369
1370 1370 # write changes out explicitly, because nesting wlock at
1371 1371 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1372 1372 # from immediately doing so for subsequent changing files
1373 1373 self._repo.dirstate.write(self._repo.currenttransaction())
1374 1374
1375 1375 class workingctx(committablectx):
1376 1376 """A workingctx object makes access to data related to
1377 1377 the current working directory convenient.
1378 1378 date - any valid date string or (unixtime, offset), or None.
1379 1379 user - username string, or None.
1380 1380 extra - a dictionary of extra values, or None.
1381 1381 changes - a list of file lists as returned by localrepo.status()
1382 1382 or None to use the repository status.
1383 1383 """
1384 1384 def __init__(self, repo, text="", user=None, date=None, extra=None,
1385 1385 changes=None):
1386 1386 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1387 1387
1388 1388 def __iter__(self):
1389 1389 d = self._repo.dirstate
1390 1390 for f in d:
1391 1391 if d[f] != 'r':
1392 1392 yield f
1393 1393
1394 1394 def __contains__(self, key):
1395 1395 return self._repo.dirstate[key] not in "?r"
1396 1396
1397 1397 def hex(self):
1398 1398 return hex(wdirid)
1399 1399
1400 1400 @propertycache
1401 1401 def _parents(self):
1402 1402 p = self._repo.dirstate.parents()
1403 1403 if p[1] == nullid:
1404 1404 p = p[:-1]
1405 1405 return [changectx(self._repo, x) for x in p]
1406 1406
1407 1407 def filectx(self, path, filelog=None):
1408 1408 """get a file context from the working directory"""
1409 1409 return workingfilectx(self._repo, path, workingctx=self,
1410 1410 filelog=filelog)
1411 1411
1412 1412 def dirty(self, missing=False, merge=True, branch=True):
1413 1413 "check whether a working directory is modified"
1414 1414 # check subrepos first
1415 1415 for s in sorted(self.substate):
1416 1416 if self.sub(s).dirty():
1417 1417 return True
1418 1418 # check current working dir
1419 1419 return ((merge and self.p2()) or
1420 1420 (branch and self.branch() != self.p1().branch()) or
1421 1421 self.modified() or self.added() or self.removed() or
1422 1422 (missing and self.deleted()))
1423 1423
1424 1424 def add(self, list, prefix=""):
1425 1425 join = lambda f: os.path.join(prefix, f)
1426 1426 with self._repo.wlock():
1427 1427 ui, ds = self._repo.ui, self._repo.dirstate
1428 1428 rejected = []
1429 1429 lstat = self._repo.wvfs.lstat
1430 1430 for f in list:
1431 1431 scmutil.checkportable(ui, join(f))
1432 1432 try:
1433 1433 st = lstat(f)
1434 1434 except OSError:
1435 1435 ui.warn(_("%s does not exist!\n") % join(f))
1436 1436 rejected.append(f)
1437 1437 continue
1438 1438 if st.st_size > 10000000:
1439 1439 ui.warn(_("%s: up to %d MB of RAM may be required "
1440 1440 "to manage this file\n"
1441 1441 "(use 'hg revert %s' to cancel the "
1442 1442 "pending addition)\n")
1443 1443 % (f, 3 * st.st_size // 1000000, join(f)))
1444 1444 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1445 1445 ui.warn(_("%s not added: only files and symlinks "
1446 1446 "supported currently\n") % join(f))
1447 1447 rejected.append(f)
1448 1448 elif ds[f] in 'amn':
1449 1449 ui.warn(_("%s already tracked!\n") % join(f))
1450 1450 elif ds[f] == 'r':
1451 1451 ds.normallookup(f)
1452 1452 else:
1453 1453 ds.add(f)
1454 1454 return rejected
1455 1455
1456 1456 def forget(self, files, prefix=""):
1457 1457 join = lambda f: os.path.join(prefix, f)
1458 1458 with self._repo.wlock():
1459 1459 rejected = []
1460 1460 for f in files:
1461 1461 if f not in self._repo.dirstate:
1462 1462 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1463 1463 rejected.append(f)
1464 1464 elif self._repo.dirstate[f] != 'a':
1465 1465 self._repo.dirstate.remove(f)
1466 1466 else:
1467 1467 self._repo.dirstate.drop(f)
1468 1468 return rejected
1469 1469
1470 1470 def undelete(self, list):
1471 1471 pctxs = self.parents()
1472 1472 with self._repo.wlock():
1473 1473 for f in list:
1474 1474 if self._repo.dirstate[f] != 'r':
1475 1475 self._repo.ui.warn(_("%s not removed!\n") % f)
1476 1476 else:
1477 1477 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1478 1478 t = fctx.data()
1479 1479 self._repo.wwrite(f, t, fctx.flags())
1480 1480 self._repo.dirstate.normal(f)
1481 1481
1482 1482 def copy(self, source, dest):
1483 1483 try:
1484 1484 st = self._repo.wvfs.lstat(dest)
1485 1485 except OSError as err:
1486 1486 if err.errno != errno.ENOENT:
1487 1487 raise
1488 1488 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1489 1489 return
1490 1490 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1491 1491 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1492 1492 "symbolic link\n") % dest)
1493 1493 else:
1494 1494 with self._repo.wlock():
1495 1495 if self._repo.dirstate[dest] in '?':
1496 1496 self._repo.dirstate.add(dest)
1497 1497 elif self._repo.dirstate[dest] in 'r':
1498 1498 self._repo.dirstate.normallookup(dest)
1499 1499 self._repo.dirstate.copy(source, dest)
1500 1500
1501 1501 def match(self, pats=[], include=None, exclude=None, default='glob',
1502 1502 listsubrepos=False, badfn=None):
1503 1503 r = self._repo
1504 1504
1505 1505 # Only a case insensitive filesystem needs magic to translate user input
1506 1506 # to actual case in the filesystem.
1507 1507 if not util.checkcase(r.root):
1508 1508 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1509 1509 exclude, default, r.auditor, self,
1510 1510 listsubrepos=listsubrepos,
1511 1511 badfn=badfn)
1512 1512 return matchmod.match(r.root, r.getcwd(), pats,
1513 1513 include, exclude, default,
1514 1514 auditor=r.auditor, ctx=self,
1515 1515 listsubrepos=listsubrepos, badfn=badfn)
1516 1516
1517 1517 def _filtersuspectsymlink(self, files):
1518 1518 if not files or self._repo.dirstate._checklink:
1519 1519 return files
1520 1520
1521 1521 # Symlink placeholders may get non-symlink-like contents
1522 1522 # via user error or dereferencing by NFS or Samba servers,
1523 1523 # so we filter out any placeholders that don't look like a
1524 1524 # symlink
1525 1525 sane = []
1526 1526 for f in files:
1527 1527 if self.flags(f) == 'l':
1528 1528 d = self[f].data()
1529 1529 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1530 1530 self._repo.ui.debug('ignoring suspect symlink placeholder'
1531 1531 ' "%s"\n' % f)
1532 1532 continue
1533 1533 sane.append(f)
1534 1534 return sane
1535 1535
1536 1536 def _checklookup(self, files):
1537 1537 # check for any possibly clean files
1538 1538 if not files:
1539 1539 return [], []
1540 1540
1541 1541 modified = []
1542 1542 fixup = []
1543 1543 pctx = self._parents[0]
1544 1544 # do a full compare of any files that might have changed
1545 1545 for f in sorted(files):
1546 1546 if (f not in pctx or self.flags(f) != pctx.flags(f)
1547 1547 or pctx[f].cmp(self[f])):
1548 1548 modified.append(f)
1549 1549 else:
1550 1550 fixup.append(f)
1551 1551
1552 1552 # update dirstate for files that are actually clean
1553 1553 if fixup:
1554 1554 try:
1555 1555 # updating the dirstate is optional
1556 1556 # so we don't wait on the lock
1557 1557 # wlock can invalidate the dirstate, so cache normal _after_
1558 1558 # taking the lock
1559 1559 with self._repo.wlock(False):
1560 1560 normal = self._repo.dirstate.normal
1561 1561 for f in fixup:
1562 1562 normal(f)
1563 1563 # write changes out explicitly, because nesting
1564 1564 # wlock at runtime may prevent 'wlock.release()'
1565 1565 # after this block from doing so for subsequent
1566 1566 # changing files
1567 1567 self._repo.dirstate.write(self._repo.currenttransaction())
1568 1568 except error.LockError:
1569 1569 pass
1570 1570 return modified, fixup
1571 1571
1572 1572 def _manifestmatches(self, match, s):
1573 1573 """Slow path for workingctx
1574 1574
1575 1575 The fast path is when we compare the working directory to its parent
1576 1576 which means this function is comparing with a non-parent; therefore we
1577 1577 need to build a manifest and return what matches.
1578 1578 """
1579 1579 mf = self._repo['.']._manifestmatches(match, s)
1580 1580 for f in s.modified + s.added:
1581 1581 mf[f] = _newnode
1582 1582 mf.setflag(f, self.flags(f))
1583 1583 for f in s.removed:
1584 1584 if f in mf:
1585 1585 del mf[f]
1586 1586 return mf
1587 1587
1588 1588 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1589 1589 unknown=False):
1590 1590 '''Gets the status from the dirstate -- internal use only.'''
1591 1591 listignored, listclean, listunknown = ignored, clean, unknown
1592 1592 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1593 1593 subrepos = []
1594 1594 if '.hgsub' in self:
1595 1595 subrepos = sorted(self.substate)
1596 1596 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1597 1597 listclean, listunknown)
1598 1598
1599 1599 # check for any possibly clean files
1600 1600 if cmp:
1601 1601 modified2, fixup = self._checklookup(cmp)
1602 1602 s.modified.extend(modified2)
1603 1603
1604 1604 # update dirstate for files that are actually clean
1605 1605 if fixup and listclean:
1606 1606 s.clean.extend(fixup)
1607 1607
1608 1608 if match.always():
1609 1609 # cache for performance
1610 1610 if s.unknown or s.ignored or s.clean:
1611 1611 # "_status" is cached with list*=False in the normal route
1612 1612 self._status = scmutil.status(s.modified, s.added, s.removed,
1613 1613 s.deleted, [], [], [])
1614 1614 else:
1615 1615 self._status = s
1616 1616
1617 1617 return s
1618 1618
1619 1619 def _buildstatus(self, other, s, match, listignored, listclean,
1620 1620 listunknown):
1621 1621 """build a status with respect to another context
1622 1622
1623 1623 This includes logic for maintaining the fast path of status when
1624 1624 comparing the working directory against its parent, which is to skip
1625 1625 building a new manifest if self (working directory) is not comparing
1626 1626 against its parent (repo['.']).
1627 1627 """
1628 1628 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1629 1629 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1630 1630 # might have accidentally ended up with the entire contents of the file
1631 1631 # they are supposed to be linking to.
1632 1632 s.modified[:] = self._filtersuspectsymlink(s.modified)
1633 1633 if other != self._repo['.']:
1634 1634 s = super(workingctx, self)._buildstatus(other, s, match,
1635 1635 listignored, listclean,
1636 1636 listunknown)
1637 1637 return s
1638 1638
1639 1639 def _matchstatus(self, other, match):
1640 1640 """override the match method with a filter for directory patterns
1641 1641
1642 1642 We use inheritance to customize the match.bad method only in cases of
1643 1643 workingctx since it belongs only to the working directory when
1644 1644 comparing against the parent changeset.
1645 1645
1646 1646 If we aren't comparing against the working directory's parent, then we
1647 1647 just use the default match object sent to us.
1648 1648 """
1649 1649 superself = super(workingctx, self)
1650 1650 match = superself._matchstatus(other, match)
1651 1651 if other != self._repo['.']:
1652 1652 def bad(f, msg):
1653 1653 # 'f' may be a directory pattern from 'match.files()',
1654 1654 # so 'f not in ctx1' is not enough
1655 1655 if f not in other and not other.hasdir(f):
1656 1656 self._repo.ui.warn('%s: %s\n' %
1657 1657 (self._repo.dirstate.pathto(f), msg))
1658 1658 match.bad = bad
1659 1659 return match
1660 1660
1661 1661 class committablefilectx(basefilectx):
1662 1662 """A committablefilectx provides common functionality for a file context
1663 1663 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1664 1664 def __init__(self, repo, path, filelog=None, ctx=None):
1665 1665 self._repo = repo
1666 1666 self._path = path
1667 1667 self._changeid = None
1668 1668 self._filerev = self._filenode = None
1669 1669
1670 1670 if filelog is not None:
1671 1671 self._filelog = filelog
1672 1672 if ctx:
1673 1673 self._changectx = ctx
1674 1674
1675 1675 def __nonzero__(self):
1676 1676 return True
1677 1677
1678 1678 def linkrev(self):
1679 1679 # linked to self._changectx no matter if file is modified or not
1680 1680 return self.rev()
1681 1681
1682 1682 def parents(self):
1683 1683 '''return parent filectxs, following copies if necessary'''
1684 1684 def filenode(ctx, path):
1685 1685 return ctx._manifest.get(path, nullid)
1686 1686
1687 1687 path = self._path
1688 1688 fl = self._filelog
1689 1689 pcl = self._changectx._parents
1690 1690 renamed = self.renamed()
1691 1691
1692 1692 if renamed:
1693 1693 pl = [renamed + (None,)]
1694 1694 else:
1695 1695 pl = [(path, filenode(pcl[0], path), fl)]
1696 1696
1697 1697 for pc in pcl[1:]:
1698 1698 pl.append((path, filenode(pc, path), fl))
1699 1699
1700 1700 return [self._parentfilectx(p, fileid=n, filelog=l)
1701 1701 for p, n, l in pl if n != nullid]
1702 1702
1703 1703 def children(self):
1704 1704 return []
1705 1705
1706 1706 class workingfilectx(committablefilectx):
1707 1707 """A workingfilectx object makes access to data related to a particular
1708 1708 file in the working directory convenient."""
1709 1709 def __init__(self, repo, path, filelog=None, workingctx=None):
1710 1710 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1711 1711
1712 1712 @propertycache
1713 1713 def _changectx(self):
1714 1714 return workingctx(self._repo)
1715 1715
1716 1716 def data(self):
1717 1717 return self._repo.wread(self._path)
1718 1718 def renamed(self):
1719 1719 rp = self._repo.dirstate.copied(self._path)
1720 1720 if not rp:
1721 1721 return None
1722 1722 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1723 1723
1724 1724 def size(self):
1725 1725 return self._repo.wvfs.lstat(self._path).st_size
1726 1726 def date(self):
1727 1727 t, tz = self._changectx.date()
1728 1728 try:
1729 1729 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1730 1730 except OSError as err:
1731 1731 if err.errno != errno.ENOENT:
1732 1732 raise
1733 1733 return (t, tz)
1734 1734
1735 1735 def cmp(self, fctx):
1736 1736 """compare with other file context
1737 1737
1738 1738 returns True if different than fctx.
1739 1739 """
1740 1740 # fctx should be a filectx (not a workingfilectx)
1741 1741 # invert comparison to reuse the same code path
1742 1742 return fctx.cmp(self)
1743 1743
1744 1744 def remove(self, ignoremissing=False):
1745 1745 """wraps unlink for a repo's working directory"""
1746 1746 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1747 1747
1748 1748 def write(self, data, flags):
1749 1749 """wraps repo.wwrite"""
1750 1750 self._repo.wwrite(self._path, data, flags)
1751 1751
1752 1752 class workingcommitctx(workingctx):
1753 1753 """A workingcommitctx object makes access to data related to
1754 1754 the revision being committed convenient.
1755 1755
1756 1756 This hides changes in the working directory, if they aren't
1757 1757 committed in this context.
1758 1758 """
1759 1759 def __init__(self, repo, changes,
1760 1760 text="", user=None, date=None, extra=None):
1761 1761 super(workingctx, self).__init__(repo, text, user, date, extra,
1762 1762 changes)
1763 1763
1764 1764 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1765 1765 unknown=False):
1766 1766 """Return matched files only in ``self._status``
1767 1767
1768 1768 Uncommitted files appear "clean" via this context, even if
1769 1769 they aren't actually so in the working directory.
1770 1770 """
1771 1771 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1772 1772 if clean:
1773 1773 clean = [f for f in self._manifest if f not in self._changedset]
1774 1774 else:
1775 1775 clean = []
1776 1776 return scmutil.status([f for f in self._status.modified if match(f)],
1777 1777 [f for f in self._status.added if match(f)],
1778 1778 [f for f in self._status.removed if match(f)],
1779 1779 [], [], [], clean)
1780 1780
1781 1781 @propertycache
1782 1782 def _changedset(self):
1783 1783 """Return the set of files changed in this context
1784 1784 """
1785 1785 changed = set(self._status.modified)
1786 1786 changed.update(self._status.added)
1787 1787 changed.update(self._status.removed)
1788 1788 return changed
1789 1789
1790 1790 def makecachingfilectxfn(func):
1791 1791 """Create a filectxfn that caches based on the path.
1792 1792
1793 1793 We can't use util.cachefunc because it uses all arguments as the cache
1794 1794 key and this creates a cycle since the arguments include the repo and
1795 1795 memctx.
1796 1796 """
1797 1797 cache = {}
1798 1798
1799 1799 def getfilectx(repo, memctx, path):
1800 1800 if path not in cache:
1801 1801 cache[path] = func(repo, memctx, path)
1802 1802 return cache[path]
1803 1803
1804 1804 return getfilectx
1805 1805
1806 1806 class memctx(committablectx):
1807 1807 """Use memctx to perform in-memory commits via localrepo.commitctx().
1808 1808
1809 1809 Revision information is supplied at initialization time while
1810 1810 related files data and is made available through a callback
1811 1811 mechanism. 'repo' is the current localrepo, 'parents' is a
1812 1812 sequence of two parent revisions identifiers (pass None for every
1813 1813 missing parent), 'text' is the commit message and 'files' lists
1814 1814 names of files touched by the revision (normalized and relative to
1815 1815 repository root).
1816 1816
1817 1817 filectxfn(repo, memctx, path) is a callable receiving the
1818 1818 repository, the current memctx object and the normalized path of
1819 1819 requested file, relative to repository root. It is fired by the
1820 1820 commit function for every file in 'files', but calls order is
1821 1821 undefined. If the file is available in the revision being
1822 1822 committed (updated or added), filectxfn returns a memfilectx
1823 1823 object. If the file was removed, filectxfn raises an
1824 1824 IOError. Moved files are represented by marking the source file
1825 1825 removed and the new file added with copy information (see
1826 1826 memfilectx).
1827 1827
1828 1828 user receives the committer name and defaults to current
1829 1829 repository username, date is the commit date in any format
1830 1830 supported by util.parsedate() and defaults to current date, extra
1831 1831 is a dictionary of metadata or is left empty.
1832 1832 """
1833 1833
1834 1834 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1835 1835 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1836 1836 # this field to determine what to do in filectxfn.
1837 1837 _returnnoneformissingfiles = True
1838 1838
1839 1839 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1840 1840 date=None, extra=None, editor=False):
1841 1841 super(memctx, self).__init__(repo, text, user, date, extra)
1842 1842 self._rev = None
1843 1843 self._node = None
1844 1844 parents = [(p or nullid) for p in parents]
1845 1845 p1, p2 = parents
1846 1846 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1847 1847 files = sorted(set(files))
1848 1848 self._files = files
1849 1849 self.substate = {}
1850 1850
1851 1851 # if store is not callable, wrap it in a function
1852 1852 if not callable(filectxfn):
1853 1853 def getfilectx(repo, memctx, path):
1854 1854 fctx = filectxfn[path]
1855 1855 # this is weird but apparently we only keep track of one parent
1856 1856 # (why not only store that instead of a tuple?)
1857 1857 copied = fctx.renamed()
1858 1858 if copied:
1859 1859 copied = copied[0]
1860 1860 return memfilectx(repo, path, fctx.data(),
1861 1861 islink=fctx.islink(), isexec=fctx.isexec(),
1862 1862 copied=copied, memctx=memctx)
1863 1863 self._filectxfn = getfilectx
1864 1864 else:
1865 1865 # memoizing increases performance for e.g. vcs convert scenarios.
1866 1866 self._filectxfn = makecachingfilectxfn(filectxfn)
1867 1867
1868 1868 if extra:
1869 1869 self._extra = extra.copy()
1870 1870 else:
1871 1871 self._extra = {}
1872 1872
1873 1873 if self._extra.get('branch', '') == '':
1874 1874 self._extra['branch'] = 'default'
1875 1875
1876 1876 if editor:
1877 1877 self._text = editor(self._repo, self, [])
1878 1878 self._repo.savecommitmessage(self._text)
1879 1879
1880 1880 def filectx(self, path, filelog=None):
1881 1881 """get a file context from the working directory
1882 1882
1883 1883 Returns None if file doesn't exist and should be removed."""
1884 1884 return self._filectxfn(self._repo, self, path)
1885 1885
1886 1886 def commit(self):
1887 1887 """commit context to the repo"""
1888 1888 return self._repo.commitctx(self)
1889 1889
1890 1890 @propertycache
1891 1891 def _manifest(self):
1892 1892 """generate a manifest based on the return values of filectxfn"""
1893 1893
1894 1894 # keep this simple for now; just worry about p1
1895 1895 pctx = self._parents[0]
1896 1896 man = pctx.manifest().copy()
1897 1897
1898 1898 for f in self._status.modified:
1899 1899 p1node = nullid
1900 1900 p2node = nullid
1901 1901 p = pctx[f].parents() # if file isn't in pctx, check p2?
1902 1902 if len(p) > 0:
1903 1903 p1node = p[0].filenode()
1904 1904 if len(p) > 1:
1905 1905 p2node = p[1].filenode()
1906 1906 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1907 1907
1908 1908 for f in self._status.added:
1909 1909 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1910 1910
1911 1911 for f in self._status.removed:
1912 1912 if f in man:
1913 1913 del man[f]
1914 1914
1915 1915 return man
1916 1916
1917 1917 @propertycache
1918 1918 def _status(self):
1919 1919 """Calculate exact status from ``files`` specified at construction
1920 1920 """
1921 1921 man1 = self.p1().manifest()
1922 1922 p2 = self._parents[1]
1923 1923 # "1 < len(self._parents)" can't be used for checking
1924 1924 # existence of the 2nd parent, because "memctx._parents" is
1925 1925 # explicitly initialized by the list, of which length is 2.
1926 1926 if p2.node() != nullid:
1927 1927 man2 = p2.manifest()
1928 1928 managing = lambda f: f in man1 or f in man2
1929 1929 else:
1930 1930 managing = lambda f: f in man1
1931 1931
1932 1932 modified, added, removed = [], [], []
1933 1933 for f in self._files:
1934 1934 if not managing(f):
1935 1935 added.append(f)
1936 1936 elif self[f]:
1937 1937 modified.append(f)
1938 1938 else:
1939 1939 removed.append(f)
1940 1940
1941 1941 return scmutil.status(modified, added, removed, [], [], [], [])
1942 1942
1943 1943 class memfilectx(committablefilectx):
1944 1944 """memfilectx represents an in-memory file to commit.
1945 1945
1946 1946 See memctx and committablefilectx for more details.
1947 1947 """
1948 1948 def __init__(self, repo, path, data, islink=False,
1949 1949 isexec=False, copied=None, memctx=None):
1950 1950 """
1951 1951 path is the normalized file path relative to repository root.
1952 1952 data is the file content as a string.
1953 1953 islink is True if the file is a symbolic link.
1954 1954 isexec is True if the file is executable.
1955 1955 copied is the source file path if current file was copied in the
1956 1956 revision being committed, or None."""
1957 1957 super(memfilectx, self).__init__(repo, path, None, memctx)
1958 1958 self._data = data
1959 1959 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1960 1960 self._copied = None
1961 1961 if copied:
1962 1962 self._copied = (copied, nullid)
1963 1963
1964 1964 def data(self):
1965 1965 return self._data
1966 1966 def size(self):
1967 1967 return len(self.data())
1968 1968 def flags(self):
1969 1969 return self._flags
1970 1970 def renamed(self):
1971 1971 return self._copied
1972 1972
1973 1973 def remove(self, ignoremissing=False):
1974 1974 """wraps unlink for a repo's working directory"""
1975 1975 # need to figure out what to do here
1976 1976 del self._changectx[self._path]
1977 1977
1978 1978 def write(self, data, flags):
1979 1979 """wraps repo.wwrite"""
1980 1980 self._data = data
@@ -1,1008 +1,1012
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import shutil
14 14
15 15 from .i18n import _
16 16 from .node import nullid
17 17
18 18 from . import (
19 19 bookmarks,
20 20 bundlerepo,
21 21 cmdutil,
22 22 destutil,
23 23 discovery,
24 24 error,
25 25 exchange,
26 26 extensions,
27 27 httppeer,
28 28 localrepo,
29 29 lock,
30 30 merge as mergemod,
31 31 node,
32 32 phases,
33 33 repoview,
34 34 scmutil,
35 35 sshpeer,
36 36 statichttprepo,
37 37 ui as uimod,
38 38 unionrepo,
39 39 url,
40 40 util,
41 41 verify as verifymod,
42 42 )
43 43
44 44 release = lock.release
45 45
46 46 def _local(path):
47 47 path = util.expandpath(util.urllocalpath(path))
48 48 return (os.path.isfile(path) and bundlerepo or localrepo)
49 49
50 50 def addbranchrevs(lrepo, other, branches, revs):
51 51 peer = other.peer() # a courtesy to callers using a localrepo for other
52 52 hashbranch, branches = branches
53 53 if not hashbranch and not branches:
54 54 x = revs or None
55 55 if util.safehasattr(revs, 'first'):
56 56 y = revs.first()
57 57 elif revs:
58 58 y = revs[0]
59 59 else:
60 60 y = None
61 61 return x, y
62 62 if revs:
63 63 revs = list(revs)
64 64 else:
65 65 revs = []
66 66
67 67 if not peer.capable('branchmap'):
68 68 if branches:
69 69 raise error.Abort(_("remote branch lookup not supported"))
70 70 revs.append(hashbranch)
71 71 return revs, revs[0]
72 72 branchmap = peer.branchmap()
73 73
74 74 def primary(branch):
75 75 if branch == '.':
76 76 if not lrepo:
77 77 raise error.Abort(_("dirstate branch not accessible"))
78 78 branch = lrepo.dirstate.branch()
79 79 if branch in branchmap:
80 80 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
81 81 return True
82 82 else:
83 83 return False
84 84
85 85 for branch in branches:
86 86 if not primary(branch):
87 87 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
88 88 if hashbranch:
89 89 if not primary(hashbranch):
90 90 revs.append(hashbranch)
91 91 return revs, revs[0]
92 92
93 93 def parseurl(path, branches=None):
94 94 '''parse url#branch, returning (url, (branch, branches))'''
95 95
96 96 u = util.url(path)
97 97 branch = None
98 98 if u.fragment:
99 99 branch = u.fragment
100 100 u.fragment = None
101 101 return str(u), (branch, branches or [])
102 102
103 103 schemes = {
104 104 'bundle': bundlerepo,
105 105 'union': unionrepo,
106 106 'file': _local,
107 107 'http': httppeer,
108 108 'https': httppeer,
109 109 'ssh': sshpeer,
110 110 'static-http': statichttprepo,
111 111 }
112 112
113 113 def _peerlookup(path):
114 114 u = util.url(path)
115 115 scheme = u.scheme or 'file'
116 116 thing = schemes.get(scheme) or schemes['file']
117 117 try:
118 118 return thing(path)
119 119 except TypeError:
120 120 # we can't test callable(thing) because 'thing' can be an unloaded
121 121 # module that implements __call__
122 122 if not util.safehasattr(thing, 'instance'):
123 123 raise
124 124 return thing
125 125
126 126 def islocal(repo):
127 127 '''return true if repo (or path pointing to repo) is local'''
128 128 if isinstance(repo, str):
129 129 try:
130 130 return _peerlookup(repo).islocal(repo)
131 131 except AttributeError:
132 132 return False
133 133 return repo.local()
134 134
135 135 def openpath(ui, path):
136 136 '''open path with open if local, url.open if remote'''
137 137 pathurl = util.url(path, parsequery=False, parsefragment=False)
138 138 if pathurl.islocal():
139 139 return util.posixfile(pathurl.localpath(), 'rb')
140 140 else:
141 141 return url.open(ui, path)
142 142
143 143 # a list of (ui, repo) functions called for wire peer initialization
144 144 wirepeersetupfuncs = []
145 145
146 146 def _peerorrepo(ui, path, create=False):
147 147 """return a repository object for the specified path"""
148 148 obj = _peerlookup(path).instance(ui, path, create)
149 149 ui = getattr(obj, "ui", ui)
150 150 for name, module in extensions.extensions(ui):
151 151 hook = getattr(module, 'reposetup', None)
152 152 if hook:
153 153 hook(ui, obj)
154 154 if not obj.local():
155 155 for f in wirepeersetupfuncs:
156 156 f(ui, obj)
157 157 return obj
158 158
159 159 def repository(ui, path='', create=False):
160 160 """return a repository object for the specified path"""
161 161 peer = _peerorrepo(ui, path, create)
162 162 repo = peer.local()
163 163 if not repo:
164 164 raise error.Abort(_("repository '%s' is not local") %
165 165 (path or peer.url()))
166 166 return repo.filtered('visible')
167 167
168 168 def peer(uiorrepo, opts, path, create=False):
169 169 '''return a repository peer for the specified path'''
170 170 rui = remoteui(uiorrepo, opts)
171 171 return _peerorrepo(rui, path, create).peer()
172 172
173 173 def defaultdest(source):
174 174 '''return default destination of clone if none is given
175 175
176 176 >>> defaultdest('foo')
177 177 'foo'
178 178 >>> defaultdest('/foo/bar')
179 179 'bar'
180 180 >>> defaultdest('/')
181 181 ''
182 182 >>> defaultdest('')
183 183 ''
184 184 >>> defaultdest('http://example.org/')
185 185 ''
186 186 >>> defaultdest('http://example.org/foo/')
187 187 'foo'
188 188 '''
189 189 path = util.url(source).path
190 190 if not path:
191 191 return ''
192 192 return os.path.basename(os.path.normpath(path))
193 193
194 194 def share(ui, source, dest=None, update=True, bookmarks=True):
195 195 '''create a shared repository'''
196 196
197 197 if not islocal(source):
198 198 raise error.Abort(_('can only share local repositories'))
199 199
200 200 if not dest:
201 201 dest = defaultdest(source)
202 202 else:
203 203 dest = ui.expandpath(dest)
204 204
205 205 if isinstance(source, str):
206 206 origsource = ui.expandpath(source)
207 207 source, branches = parseurl(origsource)
208 208 srcrepo = repository(ui, source)
209 209 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
210 210 else:
211 211 srcrepo = source.local()
212 212 origsource = source = srcrepo.url()
213 213 checkout = None
214 214
215 215 sharedpath = srcrepo.sharedpath # if our source is already sharing
216 216
217 217 destwvfs = scmutil.vfs(dest, realpath=True)
218 218 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
219 219
220 220 if destvfs.lexists():
221 221 raise error.Abort(_('destination already exists'))
222 222
223 223 if not destwvfs.isdir():
224 224 destwvfs.mkdir()
225 225 destvfs.makedir()
226 226
227 227 requirements = ''
228 228 try:
229 229 requirements = srcrepo.vfs.read('requires')
230 230 except IOError as inst:
231 231 if inst.errno != errno.ENOENT:
232 232 raise
233 233
234 234 requirements += 'shared\n'
235 235 destvfs.write('requires', requirements)
236 236 destvfs.write('sharedpath', sharedpath)
237 237
238 238 r = repository(ui, destwvfs.base)
239 239 postshare(srcrepo, r, bookmarks=bookmarks)
240 240 _postshareupdate(r, update, checkout=checkout)
241 241
242 242 def postshare(sourcerepo, destrepo, bookmarks=True):
243 243 """Called after a new shared repo is created.
244 244
245 245 The new repo only has a requirements file and pointer to the source.
246 246 This function configures additional shared data.
247 247
248 248 Extensions can wrap this function and write additional entries to
249 249 destrepo/.hg/shared to indicate additional pieces of data to be shared.
250 250 """
251 251 default = sourcerepo.ui.config('paths', 'default')
252 252 if default:
253 253 fp = destrepo.vfs("hgrc", "w", text=True)
254 254 fp.write("[paths]\n")
255 255 fp.write("default = %s\n" % default)
256 256 fp.close()
257 257
258 258 if bookmarks:
259 259 fp = destrepo.vfs('shared', 'w')
260 260 fp.write('bookmarks\n')
261 261 fp.close()
262 262
263 263 def _postshareupdate(repo, update, checkout=None):
264 264 """Maybe perform a working directory update after a shared repo is created.
265 265
266 266 ``update`` can be a boolean or a revision to update to.
267 267 """
268 268 if not update:
269 269 return
270 270
271 271 repo.ui.status(_("updating working directory\n"))
272 272 if update is not True:
273 273 checkout = update
274 274 for test in (checkout, 'default', 'tip'):
275 275 if test is None:
276 276 continue
277 277 try:
278 278 uprev = repo.lookup(test)
279 279 break
280 280 except error.RepoLookupError:
281 281 continue
282 282 _update(repo, uprev)
283 283
284 284 def copystore(ui, srcrepo, destpath):
285 285 '''copy files from store of srcrepo in destpath
286 286
287 287 returns destlock
288 288 '''
289 289 destlock = None
290 290 try:
291 291 hardlink = None
292 292 num = 0
293 293 closetopic = [None]
294 294 def prog(topic, pos):
295 295 if pos is None:
296 296 closetopic[0] = topic
297 297 else:
298 298 ui.progress(topic, pos + num)
299 299 srcpublishing = srcrepo.publishing()
300 300 srcvfs = scmutil.vfs(srcrepo.sharedpath)
301 301 dstvfs = scmutil.vfs(destpath)
302 302 for f in srcrepo.store.copylist():
303 303 if srcpublishing and f.endswith('phaseroots'):
304 304 continue
305 305 dstbase = os.path.dirname(f)
306 306 if dstbase and not dstvfs.exists(dstbase):
307 307 dstvfs.mkdir(dstbase)
308 308 if srcvfs.exists(f):
309 309 if f.endswith('data'):
310 310 # 'dstbase' may be empty (e.g. revlog format 0)
311 311 lockfile = os.path.join(dstbase, "lock")
312 312 # lock to avoid premature writing to the target
313 313 destlock = lock.lock(dstvfs, lockfile)
314 314 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
315 315 hardlink, progress=prog)
316 316 num += n
317 317 if hardlink:
318 318 ui.debug("linked %d files\n" % num)
319 319 if closetopic[0]:
320 320 ui.progress(closetopic[0], None)
321 321 else:
322 322 ui.debug("copied %d files\n" % num)
323 323 if closetopic[0]:
324 324 ui.progress(closetopic[0], None)
325 325 return destlock
326 326 except: # re-raises
327 327 release(destlock)
328 328 raise
329 329
330 330 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
331 331 rev=None, update=True, stream=False):
332 332 """Perform a clone using a shared repo.
333 333
334 334 The store for the repository will be located at <sharepath>/.hg. The
335 335 specified revisions will be cloned or pulled from "source". A shared repo
336 336 will be created at "dest" and a working copy will be created if "update" is
337 337 True.
338 338 """
339 339 revs = None
340 340 if rev:
341 341 if not srcpeer.capable('lookup'):
342 342 raise error.Abort(_("src repository does not support "
343 343 "revision lookup and so doesn't "
344 344 "support clone by revision"))
345 345 revs = [srcpeer.lookup(r) for r in rev]
346 346
347 347 # Obtain a lock before checking for or cloning the pooled repo otherwise
348 348 # 2 clients may race creating or populating it.
349 349 pooldir = os.path.dirname(sharepath)
350 350 # lock class requires the directory to exist.
351 351 try:
352 352 util.makedir(pooldir, False)
353 353 except OSError as e:
354 354 if e.errno != errno.EEXIST:
355 355 raise
356 356
357 357 poolvfs = scmutil.vfs(pooldir)
358 358 basename = os.path.basename(sharepath)
359 359
360 360 with lock.lock(poolvfs, '%s.lock' % basename):
361 361 if os.path.exists(sharepath):
362 362 ui.status(_('(sharing from existing pooled repository %s)\n') %
363 363 basename)
364 364 else:
365 365 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
366 366 # Always use pull mode because hardlinks in share mode don't work
367 367 # well. Never update because working copies aren't necessary in
368 368 # share mode.
369 369 clone(ui, peeropts, source, dest=sharepath, pull=True,
370 370 rev=rev, update=False, stream=stream)
371 371
372 372 sharerepo = repository(ui, path=sharepath)
373 373 share(ui, sharerepo, dest=dest, update=False, bookmarks=False)
374 374
375 375 # We need to perform a pull against the dest repo to fetch bookmarks
376 376 # and other non-store data that isn't shared by default. In the case of
377 377 # non-existing shared repo, this means we pull from the remote twice. This
378 378 # is a bit weird. But at the time it was implemented, there wasn't an easy
379 379 # way to pull just non-changegroup data.
380 380 destrepo = repository(ui, path=dest)
381 381 exchange.pull(destrepo, srcpeer, heads=revs)
382 382
383 383 _postshareupdate(destrepo, update)
384 384
385 385 return srcpeer, peer(ui, peeropts, dest)
386 386
387 387 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
388 388 update=True, stream=False, branch=None, shareopts=None):
389 389 """Make a copy of an existing repository.
390 390
391 391 Create a copy of an existing repository in a new directory. The
392 392 source and destination are URLs, as passed to the repository
393 393 function. Returns a pair of repository peers, the source and
394 394 newly created destination.
395 395
396 396 The location of the source is added to the new repository's
397 397 .hg/hgrc file, as the default to be used for future pulls and
398 398 pushes.
399 399
400 400 If an exception is raised, the partly cloned/updated destination
401 401 repository will be deleted.
402 402
403 403 Arguments:
404 404
405 405 source: repository object or URL
406 406
407 407 dest: URL of destination repository to create (defaults to base
408 408 name of source repository)
409 409
410 410 pull: always pull from source repository, even in local case or if the
411 411 server prefers streaming
412 412
413 413 stream: stream raw data uncompressed from repository (fast over
414 414 LAN, slow over WAN)
415 415
416 416 rev: revision to clone up to (implies pull=True)
417 417
418 418 update: update working directory after clone completes, if
419 419 destination is local repository (True means update to default rev,
420 420 anything else is treated as a revision)
421 421
422 422 branch: branches to clone
423 423
424 424 shareopts: dict of options to control auto sharing behavior. The "pool" key
425 425 activates auto sharing mode and defines the directory for stores. The
426 426 "mode" key determines how to construct the directory name of the shared
427 427 repository. "identity" means the name is derived from the node of the first
428 428 changeset in the repository. "remote" means the name is derived from the
429 429 remote's path/URL. Defaults to "identity."
430 430 """
431 431
432 432 if isinstance(source, str):
433 433 origsource = ui.expandpath(source)
434 434 source, branch = parseurl(origsource, branch)
435 435 srcpeer = peer(ui, peeropts, source)
436 436 else:
437 437 srcpeer = source.peer() # in case we were called with a localrepo
438 438 branch = (None, branch or [])
439 439 origsource = source = srcpeer.url()
440 440 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
441 441
442 442 if dest is None:
443 443 dest = defaultdest(source)
444 444 if dest:
445 445 ui.status(_("destination directory: %s\n") % dest)
446 446 else:
447 447 dest = ui.expandpath(dest)
448 448
449 449 dest = util.urllocalpath(dest)
450 450 source = util.urllocalpath(source)
451 451
452 452 if not dest:
453 453 raise error.Abort(_("empty destination path is not valid"))
454 454
455 455 destvfs = scmutil.vfs(dest, expandpath=True)
456 456 if destvfs.lexists():
457 457 if not destvfs.isdir():
458 458 raise error.Abort(_("destination '%s' already exists") % dest)
459 459 elif destvfs.listdir():
460 460 raise error.Abort(_("destination '%s' is not empty") % dest)
461 461
462 462 shareopts = shareopts or {}
463 463 sharepool = shareopts.get('pool')
464 464 sharenamemode = shareopts.get('mode')
465 465 if sharepool and islocal(dest):
466 466 sharepath = None
467 467 if sharenamemode == 'identity':
468 468 # Resolve the name from the initial changeset in the remote
469 469 # repository. This returns nullid when the remote is empty. It
470 470 # raises RepoLookupError if revision 0 is filtered or otherwise
471 471 # not available. If we fail to resolve, sharing is not enabled.
472 472 try:
473 473 rootnode = srcpeer.lookup('0')
474 474 if rootnode != node.nullid:
475 475 sharepath = os.path.join(sharepool, node.hex(rootnode))
476 476 else:
477 477 ui.status(_('(not using pooled storage: '
478 478 'remote appears to be empty)\n'))
479 479 except error.RepoLookupError:
480 480 ui.status(_('(not using pooled storage: '
481 481 'unable to resolve identity of remote)\n'))
482 482 elif sharenamemode == 'remote':
483 483 sharepath = os.path.join(sharepool, util.sha1(source).hexdigest())
484 484 else:
485 485 raise error.Abort('unknown share naming mode: %s' % sharenamemode)
486 486
487 487 if sharepath:
488 488 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
489 489 dest, pull=pull, rev=rev, update=update,
490 490 stream=stream)
491 491
492 492 srclock = destlock = cleandir = None
493 493 srcrepo = srcpeer.local()
494 494 try:
495 495 abspath = origsource
496 496 if islocal(origsource):
497 497 abspath = os.path.abspath(util.urllocalpath(origsource))
498 498
499 499 if islocal(dest):
500 500 cleandir = dest
501 501
502 502 copy = False
503 503 if (srcrepo and srcrepo.cancopy() and islocal(dest)
504 504 and not phases.hassecret(srcrepo)):
505 505 copy = not pull and not rev
506 506
507 507 if copy:
508 508 try:
509 509 # we use a lock here because if we race with commit, we
510 510 # can end up with extra data in the cloned revlogs that's
511 511 # not pointed to by changesets, thus causing verify to
512 512 # fail
513 513 srclock = srcrepo.lock(wait=False)
514 514 except error.LockError:
515 515 copy = False
516 516
517 517 if copy:
518 518 srcrepo.hook('preoutgoing', throw=True, source='clone')
519 519 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
520 520 if not os.path.exists(dest):
521 521 os.mkdir(dest)
522 522 else:
523 523 # only clean up directories we create ourselves
524 524 cleandir = hgdir
525 525 try:
526 526 destpath = hgdir
527 527 util.makedir(destpath, notindexed=True)
528 528 except OSError as inst:
529 529 if inst.errno == errno.EEXIST:
530 530 cleandir = None
531 531 raise error.Abort(_("destination '%s' already exists")
532 532 % dest)
533 533 raise
534 534
535 535 destlock = copystore(ui, srcrepo, destpath)
536 536 # copy bookmarks over
537 537 srcbookmarks = srcrepo.join('bookmarks')
538 538 dstbookmarks = os.path.join(destpath, 'bookmarks')
539 539 if os.path.exists(srcbookmarks):
540 540 util.copyfile(srcbookmarks, dstbookmarks)
541 541
542 542 # Recomputing branch cache might be slow on big repos,
543 543 # so just copy it
544 544 def copybranchcache(fname):
545 545 srcbranchcache = srcrepo.join('cache/%s' % fname)
546 546 dstbranchcache = os.path.join(dstcachedir, fname)
547 547 if os.path.exists(srcbranchcache):
548 548 if not os.path.exists(dstcachedir):
549 549 os.mkdir(dstcachedir)
550 550 util.copyfile(srcbranchcache, dstbranchcache)
551 551
552 552 dstcachedir = os.path.join(destpath, 'cache')
553 553 # In local clones we're copying all nodes, not just served
554 554 # ones. Therefore copy all branch caches over.
555 555 copybranchcache('branch2')
556 556 for cachename in repoview.filtertable:
557 557 copybranchcache('branch2-%s' % cachename)
558 558
559 559 # we need to re-init the repo after manually copying the data
560 560 # into it
561 561 destpeer = peer(srcrepo, peeropts, dest)
562 562 srcrepo.hook('outgoing', source='clone',
563 563 node=node.hex(node.nullid))
564 564 else:
565 565 try:
566 566 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
567 567 # only pass ui when no srcrepo
568 568 except OSError as inst:
569 569 if inst.errno == errno.EEXIST:
570 570 cleandir = None
571 571 raise error.Abort(_("destination '%s' already exists")
572 572 % dest)
573 573 raise
574 574
575 575 revs = None
576 576 if rev:
577 577 if not srcpeer.capable('lookup'):
578 578 raise error.Abort(_("src repository does not support "
579 579 "revision lookup and so doesn't "
580 580 "support clone by revision"))
581 581 revs = [srcpeer.lookup(r) for r in rev]
582 582 checkout = revs[0]
583 583 local = destpeer.local()
584 584 if local:
585 585 if not stream:
586 586 if pull:
587 587 stream = False
588 588 else:
589 589 stream = None
590 590 # internal config: ui.quietbookmarkmove
591 591 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
592 592 try:
593 593 local.ui.setconfig(
594 594 'ui', 'quietbookmarkmove', True, 'clone')
595 595 exchange.pull(local, srcpeer, revs,
596 596 streamclonerequested=stream)
597 597 finally:
598 598 local.ui.restoreconfig(quiet)
599 599 elif srcrepo:
600 600 exchange.push(srcrepo, destpeer, revs=revs,
601 601 bookmarks=srcrepo._bookmarks.keys())
602 602 else:
603 603 raise error.Abort(_("clone from remote to remote not supported")
604 604 )
605 605
606 606 cleandir = None
607 607
608 608 destrepo = destpeer.local()
609 609 if destrepo:
610 610 template = uimod.samplehgrcs['cloned']
611 611 fp = destrepo.vfs("hgrc", "w", text=True)
612 612 u = util.url(abspath)
613 613 u.passwd = None
614 614 defaulturl = str(u)
615 615 fp.write(template % defaulturl)
616 616 fp.close()
617 617
618 618 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
619 619
620 620 if update:
621 621 if update is not True:
622 622 checkout = srcpeer.lookup(update)
623 623 uprev = None
624 624 status = None
625 625 if checkout is not None:
626 626 try:
627 627 uprev = destrepo.lookup(checkout)
628 628 except error.RepoLookupError:
629 629 if update is not True:
630 630 try:
631 631 uprev = destrepo.lookup(update)
632 632 except error.RepoLookupError:
633 633 pass
634 634 if uprev is None:
635 635 try:
636 636 uprev = destrepo._bookmarks['@']
637 637 update = '@'
638 638 bn = destrepo[uprev].branch()
639 639 if bn == 'default':
640 640 status = _("updating to bookmark @\n")
641 641 else:
642 642 status = (_("updating to bookmark @ on branch %s\n")
643 643 % bn)
644 644 except KeyError:
645 645 try:
646 646 uprev = destrepo.branchtip('default')
647 647 except error.RepoLookupError:
648 648 uprev = destrepo.lookup('tip')
649 649 if not status:
650 650 bn = destrepo[uprev].branch()
651 651 status = _("updating to branch %s\n") % bn
652 652 destrepo.ui.status(status)
653 653 _update(destrepo, uprev)
654 654 if update in destrepo._bookmarks:
655 655 bookmarks.activate(destrepo, update)
656 656 finally:
657 657 release(srclock, destlock)
658 658 if cleandir is not None:
659 659 shutil.rmtree(cleandir, True)
660 660 if srcpeer is not None:
661 661 srcpeer.close()
662 662 return srcpeer, destpeer
663 663
664 664 def _showstats(repo, stats, quietempty=False):
665 665 if quietempty and not any(stats):
666 666 return
667 667 repo.ui.status(_("%d files updated, %d files merged, "
668 668 "%d files removed, %d files unresolved\n") % stats)
669 669
670 670 def updaterepo(repo, node, overwrite):
671 671 """Update the working directory to node.
672 672
673 673 When overwrite is set, changes are clobbered, merged else
674 674
675 675 returns stats (see pydoc mercurial.merge.applyupdates)"""
676 676 return mergemod.update(repo, node, False, overwrite,
677 677 labels=['working copy', 'destination'])
678 678
679 679 def update(repo, node, quietempty=False):
680 680 """update the working directory to node, merging linear changes"""
681 681 stats = updaterepo(repo, node, False)
682 682 _showstats(repo, stats, quietempty)
683 683 if stats[3]:
684 684 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
685 685 return stats[3] > 0
686 686
687 687 # naming conflict in clone()
688 688 _update = update
689 689
690 690 def clean(repo, node, show_stats=True, quietempty=False):
691 691 """forcibly switch the working directory to node, clobbering changes"""
692 692 stats = updaterepo(repo, node, True)
693 693 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
694 694 if show_stats:
695 695 _showstats(repo, stats, quietempty)
696 696 return stats[3] > 0
697 697
698 698 # naming conflict in updatetotally()
699 699 _clean = clean
700 700
701 701 def updatetotally(ui, repo, checkout, brev, clean=False, check=False):
702 702 """Update the working directory with extra care for non-file components
703 703
704 704 This takes care of non-file components below:
705 705
706 706 :bookmark: might be advanced or (in)activated
707 707
708 708 This takes arguments below:
709 709
710 710 :checkout: to which revision the working directory is updated
711 711 :brev: a name, which might be a bookmark to be activated after updating
712 712 :clean: whether changes in the working directory can be discarded
713 713 :check: whether changes in the working directory should be checked
714 714
715 715 This returns whether conflict is detected at updating or not.
716 716 """
717 717 with repo.wlock():
718 718 movemarkfrom = None
719 719 warndest = False
720 720 if checkout is None:
721 721 updata = destutil.destupdate(repo, clean=clean, check=check)
722 722 checkout, movemarkfrom, brev = updata
723 723 warndest = True
724 724
725 725 if clean:
726 726 ret = _clean(repo, checkout)
727 727 else:
728 728 ret = _update(repo, checkout)
729 729
730 730 if not ret and movemarkfrom:
731 731 if movemarkfrom == repo['.'].node():
732 732 pass # no-op update
733 733 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
734 734 ui.status(_("updating bookmark %s\n") % repo._activebookmark)
735 735 else:
736 736 # this can happen with a non-linear update
737 737 ui.status(_("(leaving bookmark %s)\n") %
738 738 repo._activebookmark)
739 739 bookmarks.deactivate(repo)
740 740 elif brev in repo._bookmarks:
741 741 if brev != repo._activebookmark:
742 742 ui.status(_("(activating bookmark %s)\n") % brev)
743 743 bookmarks.activate(repo, brev)
744 744 elif brev:
745 745 if repo._activebookmark:
746 746 ui.status(_("(leaving bookmark %s)\n") %
747 747 repo._activebookmark)
748 748 bookmarks.deactivate(repo)
749 749
750 750 if warndest:
751 751 destutil.statusotherdests(ui, repo)
752 752
753 753 return ret
754 754
755 755 def merge(repo, node, force=None, remind=True, mergeforce=False):
756 756 """Branch merge with node, resolving changes. Return true if any
757 757 unresolved conflicts."""
758 758 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce)
759 759 _showstats(repo, stats)
760 760 if stats[3]:
761 761 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
762 762 "or 'hg update -C .' to abandon\n"))
763 763 elif remind:
764 764 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
765 765 return stats[3] > 0
766 766
767 767 def _incoming(displaychlist, subreporecurse, ui, repo, source,
768 768 opts, buffered=False):
769 769 """
770 770 Helper for incoming / gincoming.
771 771 displaychlist gets called with
772 772 (remoterepo, incomingchangesetlist, displayer) parameters,
773 773 and is supposed to contain only code that can't be unified.
774 774 """
775 775 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
776 776 other = peer(repo, opts, source)
777 777 ui.status(_('comparing with %s\n') % util.hidepassword(source))
778 778 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
779 779
780 780 if revs:
781 781 revs = [other.lookup(rev) for rev in revs]
782 782 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
783 783 revs, opts["bundle"], opts["force"])
784 784 try:
785 785 if not chlist:
786 786 ui.status(_("no changes found\n"))
787 787 return subreporecurse()
788 788
789 789 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
790 790 displaychlist(other, chlist, displayer)
791 791 displayer.close()
792 792 finally:
793 793 cleanupfn()
794 794 subreporecurse()
795 795 return 0 # exit code is zero since we found incoming changes
796 796
797 797 def incoming(ui, repo, source, opts):
798 798 def subreporecurse():
799 799 ret = 1
800 800 if opts.get('subrepos'):
801 801 ctx = repo[None]
802 802 for subpath in sorted(ctx.substate):
803 803 sub = ctx.sub(subpath)
804 804 ret = min(ret, sub.incoming(ui, source, opts))
805 805 return ret
806 806
807 807 def display(other, chlist, displayer):
808 808 limit = cmdutil.loglimit(opts)
809 809 if opts.get('newest_first'):
810 810 chlist.reverse()
811 811 count = 0
812 812 for n in chlist:
813 813 if limit is not None and count >= limit:
814 814 break
815 815 parents = [p for p in other.changelog.parents(n) if p != nullid]
816 816 if opts.get('no_merges') and len(parents) == 2:
817 817 continue
818 818 count += 1
819 819 displayer.show(other[n])
820 820 return _incoming(display, subreporecurse, ui, repo, source, opts)
821 821
822 822 def _outgoing(ui, repo, dest, opts):
823 823 dest = ui.expandpath(dest or 'default-push', dest or 'default')
824 824 dest, branches = parseurl(dest, opts.get('branch'))
825 825 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
826 826 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
827 827 if revs:
828 828 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
829 829
830 830 other = peer(repo, opts, dest)
831 831 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
832 832 force=opts.get('force'))
833 833 o = outgoing.missing
834 834 if not o:
835 835 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
836 836 return o, other
837 837
838 838 def outgoing(ui, repo, dest, opts):
839 839 def recurse():
840 840 ret = 1
841 841 if opts.get('subrepos'):
842 842 ctx = repo[None]
843 843 for subpath in sorted(ctx.substate):
844 844 sub = ctx.sub(subpath)
845 845 ret = min(ret, sub.outgoing(ui, dest, opts))
846 846 return ret
847 847
848 848 limit = cmdutil.loglimit(opts)
849 849 o, other = _outgoing(ui, repo, dest, opts)
850 850 if not o:
851 851 cmdutil.outgoinghooks(ui, repo, other, opts, o)
852 852 return recurse()
853 853
854 854 if opts.get('newest_first'):
855 855 o.reverse()
856 856 displayer = cmdutil.show_changeset(ui, repo, opts)
857 857 count = 0
858 858 for n in o:
859 859 if limit is not None and count >= limit:
860 860 break
861 861 parents = [p for p in repo.changelog.parents(n) if p != nullid]
862 862 if opts.get('no_merges') and len(parents) == 2:
863 863 continue
864 864 count += 1
865 865 displayer.show(repo[n])
866 866 displayer.close()
867 867 cmdutil.outgoinghooks(ui, repo, other, opts, o)
868 868 recurse()
869 869 return 0 # exit code is zero since we found outgoing changes
870 870
871 871 def verify(repo):
872 872 """verify the consistency of a repository"""
873 873 ret = verifymod.verify(repo)
874 874
875 875 # Broken subrepo references in hidden csets don't seem worth worrying about,
876 876 # since they can't be pushed/pulled, and --hidden can be used if they are a
877 877 # concern.
878 878
879 879 # pathto() is needed for -R case
880 880 revs = repo.revs("filelog(%s)",
881 881 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
882 882
883 883 if revs:
884 884 repo.ui.status(_('checking subrepo links\n'))
885 885 for rev in revs:
886 886 ctx = repo[rev]
887 887 try:
888 888 for subpath in ctx.substate:
889 ret = ctx.sub(subpath).verify() or ret
889 try:
890 ret = (ctx.sub(subpath, allowcreate=False).verify()
891 or ret)
892 except error.RepoError as e:
893 repo.ui.warn(_('%s: %s\n') % (rev, e))
890 894 except Exception:
891 895 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
892 896 node.short(ctx.node()))
893 897
894 898 return ret
895 899
896 900 def remoteui(src, opts):
897 901 'build a remote ui from ui or repo and opts'
898 902 if util.safehasattr(src, 'baseui'): # looks like a repository
899 903 dst = src.baseui.copy() # drop repo-specific config
900 904 src = src.ui # copy target options from repo
901 905 else: # assume it's a global ui object
902 906 dst = src.copy() # keep all global options
903 907
904 908 # copy ssh-specific options
905 909 for o in 'ssh', 'remotecmd':
906 910 v = opts.get(o) or src.config('ui', o)
907 911 if v:
908 912 dst.setconfig("ui", o, v, 'copied')
909 913
910 914 # copy bundle-specific options
911 915 r = src.config('bundle', 'mainreporoot')
912 916 if r:
913 917 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
914 918
915 919 # copy selected local settings to the remote ui
916 920 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
917 921 for key, val in src.configitems(sect):
918 922 dst.setconfig(sect, key, val, 'copied')
919 923 v = src.config('web', 'cacerts')
920 924 if v == '!':
921 925 dst.setconfig('web', 'cacerts', v, 'copied')
922 926 elif v:
923 927 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
924 928
925 929 return dst
926 930
927 931 # Files of interest
928 932 # Used to check if the repository has changed looking at mtime and size of
929 933 # these files.
930 934 foi = [('spath', '00changelog.i'),
931 935 ('spath', 'phaseroots'), # ! phase can change content at the same size
932 936 ('spath', 'obsstore'),
933 937 ('path', 'bookmarks'), # ! bookmark can change content at the same size
934 938 ]
935 939
936 940 class cachedlocalrepo(object):
937 941 """Holds a localrepository that can be cached and reused."""
938 942
939 943 def __init__(self, repo):
940 944 """Create a new cached repo from an existing repo.
941 945
942 946 We assume the passed in repo was recently created. If the
943 947 repo has changed between when it was created and when it was
944 948 turned into a cache, it may not refresh properly.
945 949 """
946 950 assert isinstance(repo, localrepo.localrepository)
947 951 self._repo = repo
948 952 self._state, self.mtime = self._repostate()
949 953 self._filtername = repo.filtername
950 954
951 955 def fetch(self):
952 956 """Refresh (if necessary) and return a repository.
953 957
954 958 If the cached instance is out of date, it will be recreated
955 959 automatically and returned.
956 960
957 961 Returns a tuple of the repo and a boolean indicating whether a new
958 962 repo instance was created.
959 963 """
960 964 # We compare the mtimes and sizes of some well-known files to
961 965 # determine if the repo changed. This is not precise, as mtimes
962 966 # are susceptible to clock skew and imprecise filesystems and
963 967 # file content can change while maintaining the same size.
964 968
965 969 state, mtime = self._repostate()
966 970 if state == self._state:
967 971 return self._repo, False
968 972
969 973 repo = repository(self._repo.baseui, self._repo.url())
970 974 if self._filtername:
971 975 self._repo = repo.filtered(self._filtername)
972 976 else:
973 977 self._repo = repo.unfiltered()
974 978 self._state = state
975 979 self.mtime = mtime
976 980
977 981 return self._repo, True
978 982
979 983 def _repostate(self):
980 984 state = []
981 985 maxmtime = -1
982 986 for attr, fname in foi:
983 987 prefix = getattr(self._repo, attr)
984 988 p = os.path.join(prefix, fname)
985 989 try:
986 990 st = os.stat(p)
987 991 except OSError:
988 992 st = os.stat(prefix)
989 993 state.append((st.st_mtime, st.st_size))
990 994 maxmtime = max(maxmtime, st.st_mtime)
991 995
992 996 return tuple(state), maxmtime
993 997
994 998 def copy(self):
995 999 """Obtain a copy of this class instance.
996 1000
997 1001 A new localrepository instance is obtained. The new instance should be
998 1002 completely independent of the original.
999 1003 """
1000 1004 repo = repository(self._repo.baseui, self._repo.origroot)
1001 1005 if self._filtername:
1002 1006 repo = repo.filtered(self._filtername)
1003 1007 else:
1004 1008 repo = repo.unfiltered()
1005 1009 c = cachedlocalrepo(repo)
1006 1010 c._state = self._state
1007 1011 c.mtime = self.mtime
1008 1012 return c
@@ -1,1947 +1,1947
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy
11 11 import errno
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import stat
16 16 import subprocess
17 17 import sys
18 18 import tarfile
19 19 import xml.dom.minidom
20 20
21 21
22 22 from .i18n import _
23 23 from . import (
24 24 cmdutil,
25 25 config,
26 26 error,
27 27 exchange,
28 28 match as matchmod,
29 29 node,
30 30 pathutil,
31 31 phases,
32 32 scmutil,
33 33 util,
34 34 )
35 35
36 36 hg = None
37 37 propertycache = util.propertycache
38 38
39 39 nullstate = ('', '', 'empty')
40 40
41 41 def _expandedabspath(path):
42 42 '''
43 43 get a path or url and if it is a path expand it and return an absolute path
44 44 '''
45 45 expandedpath = util.urllocalpath(util.expandpath(path))
46 46 u = util.url(expandedpath)
47 47 if not u.scheme:
48 48 path = util.normpath(os.path.abspath(u.path))
49 49 return path
50 50
51 51 def _getstorehashcachename(remotepath):
52 52 '''get a unique filename for the store hash cache of a remote repository'''
53 53 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
54 54
55 55 class SubrepoAbort(error.Abort):
56 56 """Exception class used to avoid handling a subrepo error more than once"""
57 57 def __init__(self, *args, **kw):
58 58 error.Abort.__init__(self, *args, **kw)
59 59 self.subrepo = kw.get('subrepo')
60 60 self.cause = kw.get('cause')
61 61
62 62 def annotatesubrepoerror(func):
63 63 def decoratedmethod(self, *args, **kargs):
64 64 try:
65 65 res = func(self, *args, **kargs)
66 66 except SubrepoAbort as ex:
67 67 # This exception has already been handled
68 68 raise ex
69 69 except error.Abort as ex:
70 70 subrepo = subrelpath(self)
71 71 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
72 72 # avoid handling this exception by raising a SubrepoAbort exception
73 73 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
74 74 cause=sys.exc_info())
75 75 return res
76 76 return decoratedmethod
77 77
78 78 def state(ctx, ui):
79 79 """return a state dict, mapping subrepo paths configured in .hgsub
80 80 to tuple: (source from .hgsub, revision from .hgsubstate, kind
81 81 (key in types dict))
82 82 """
83 83 p = config.config()
84 84 repo = ctx.repo()
85 85 def read(f, sections=None, remap=None):
86 86 if f in ctx:
87 87 try:
88 88 data = ctx[f].data()
89 89 except IOError as err:
90 90 if err.errno != errno.ENOENT:
91 91 raise
92 92 # handle missing subrepo spec files as removed
93 93 ui.warn(_("warning: subrepo spec file \'%s\' not found\n") %
94 94 repo.pathto(f))
95 95 return
96 96 p.parse(f, data, sections, remap, read)
97 97 else:
98 98 raise error.Abort(_("subrepo spec file \'%s\' not found") %
99 99 repo.pathto(f))
100 100 if '.hgsub' in ctx:
101 101 read('.hgsub')
102 102
103 103 for path, src in ui.configitems('subpaths'):
104 104 p.set('subpaths', path, src, ui.configsource('subpaths', path))
105 105
106 106 rev = {}
107 107 if '.hgsubstate' in ctx:
108 108 try:
109 109 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
110 110 l = l.lstrip()
111 111 if not l:
112 112 continue
113 113 try:
114 114 revision, path = l.split(" ", 1)
115 115 except ValueError:
116 116 raise error.Abort(_("invalid subrepository revision "
117 117 "specifier in \'%s\' line %d")
118 118 % (repo.pathto('.hgsubstate'), (i + 1)))
119 119 rev[path] = revision
120 120 except IOError as err:
121 121 if err.errno != errno.ENOENT:
122 122 raise
123 123
124 124 def remap(src):
125 125 for pattern, repl in p.items('subpaths'):
126 126 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
127 127 # does a string decode.
128 128 repl = repl.encode('string-escape')
129 129 # However, we still want to allow back references to go
130 130 # through unharmed, so we turn r'\\1' into r'\1'. Again,
131 131 # extra escapes are needed because re.sub string decodes.
132 132 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
133 133 try:
134 134 src = re.sub(pattern, repl, src, 1)
135 135 except re.error as e:
136 136 raise error.Abort(_("bad subrepository pattern in %s: %s")
137 137 % (p.source('subpaths', pattern), e))
138 138 return src
139 139
140 140 state = {}
141 141 for path, src in p[''].items():
142 142 kind = 'hg'
143 143 if src.startswith('['):
144 144 if ']' not in src:
145 145 raise error.Abort(_('missing ] in subrepo source'))
146 146 kind, src = src.split(']', 1)
147 147 kind = kind[1:]
148 148 src = src.lstrip() # strip any extra whitespace after ']'
149 149
150 150 if not util.url(src).isabs():
151 151 parent = _abssource(repo, abort=False)
152 152 if parent:
153 153 parent = util.url(parent)
154 154 parent.path = posixpath.join(parent.path or '', src)
155 155 parent.path = posixpath.normpath(parent.path)
156 156 joined = str(parent)
157 157 # Remap the full joined path and use it if it changes,
158 158 # else remap the original source.
159 159 remapped = remap(joined)
160 160 if remapped == joined:
161 161 src = remap(src)
162 162 else:
163 163 src = remapped
164 164
165 165 src = remap(src)
166 166 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
167 167
168 168 return state
169 169
170 170 def writestate(repo, state):
171 171 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
172 172 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)
173 173 if state[s][1] != nullstate[1]]
174 174 repo.wwrite('.hgsubstate', ''.join(lines), '')
175 175
176 176 def submerge(repo, wctx, mctx, actx, overwrite):
177 177 """delegated from merge.applyupdates: merging of .hgsubstate file
178 178 in working context, merging context and ancestor context"""
179 179 if mctx == actx: # backwards?
180 180 actx = wctx.p1()
181 181 s1 = wctx.substate
182 182 s2 = mctx.substate
183 183 sa = actx.substate
184 184 sm = {}
185 185
186 186 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
187 187
188 188 def debug(s, msg, r=""):
189 189 if r:
190 190 r = "%s:%s:%s" % r
191 191 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
192 192
193 193 for s, l in sorted(s1.iteritems()):
194 194 a = sa.get(s, nullstate)
195 195 ld = l # local state with possible dirty flag for compares
196 196 if wctx.sub(s).dirty():
197 197 ld = (l[0], l[1] + "+")
198 198 if wctx == actx: # overwrite
199 199 a = ld
200 200
201 201 if s in s2:
202 202 r = s2[s]
203 203 if ld == r or r == a: # no change or local is newer
204 204 sm[s] = l
205 205 continue
206 206 elif ld == a: # other side changed
207 207 debug(s, "other changed, get", r)
208 208 wctx.sub(s).get(r, overwrite)
209 209 sm[s] = r
210 210 elif ld[0] != r[0]: # sources differ
211 211 if repo.ui.promptchoice(
212 212 _(' subrepository sources for %s differ\n'
213 213 'use (l)ocal source (%s) or (r)emote source (%s)?'
214 214 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
215 215 debug(s, "prompt changed, get", r)
216 216 wctx.sub(s).get(r, overwrite)
217 217 sm[s] = r
218 218 elif ld[1] == a[1]: # local side is unchanged
219 219 debug(s, "other side changed, get", r)
220 220 wctx.sub(s).get(r, overwrite)
221 221 sm[s] = r
222 222 else:
223 223 debug(s, "both sides changed")
224 224 srepo = wctx.sub(s)
225 225 option = repo.ui.promptchoice(
226 226 _(' subrepository %s diverged (local revision: %s, '
227 227 'remote revision: %s)\n'
228 228 '(M)erge, keep (l)ocal or keep (r)emote?'
229 229 '$$ &Merge $$ &Local $$ &Remote')
230 230 % (s, srepo.shortid(l[1]), srepo.shortid(r[1])), 0)
231 231 if option == 0:
232 232 wctx.sub(s).merge(r)
233 233 sm[s] = l
234 234 debug(s, "merge with", r)
235 235 elif option == 1:
236 236 sm[s] = l
237 237 debug(s, "keep local subrepo revision", l)
238 238 else:
239 239 wctx.sub(s).get(r, overwrite)
240 240 sm[s] = r
241 241 debug(s, "get remote subrepo revision", r)
242 242 elif ld == a: # remote removed, local unchanged
243 243 debug(s, "remote removed, remove")
244 244 wctx.sub(s).remove()
245 245 elif a == nullstate: # not present in remote or ancestor
246 246 debug(s, "local added, keep")
247 247 sm[s] = l
248 248 continue
249 249 else:
250 250 if repo.ui.promptchoice(
251 251 _(' local changed subrepository %s which remote removed\n'
252 252 'use (c)hanged version or (d)elete?'
253 253 '$$ &Changed $$ &Delete') % s, 0):
254 254 debug(s, "prompt remove")
255 255 wctx.sub(s).remove()
256 256
257 257 for s, r in sorted(s2.items()):
258 258 if s in s1:
259 259 continue
260 260 elif s not in sa:
261 261 debug(s, "remote added, get", r)
262 262 mctx.sub(s).get(r)
263 263 sm[s] = r
264 264 elif r != sa[s]:
265 265 if repo.ui.promptchoice(
266 266 _(' remote changed subrepository %s which local removed\n'
267 267 'use (c)hanged version or (d)elete?'
268 268 '$$ &Changed $$ &Delete') % s, 0) == 0:
269 269 debug(s, "prompt recreate", r)
270 270 mctx.sub(s).get(r)
271 271 sm[s] = r
272 272
273 273 # record merged .hgsubstate
274 274 writestate(repo, sm)
275 275 return sm
276 276
277 277 def _updateprompt(ui, sub, dirty, local, remote):
278 278 if dirty:
279 279 msg = (_(' subrepository sources for %s differ\n'
280 280 'use (l)ocal source (%s) or (r)emote source (%s)?'
281 281 '$$ &Local $$ &Remote')
282 282 % (subrelpath(sub), local, remote))
283 283 else:
284 284 msg = (_(' subrepository sources for %s differ (in checked out '
285 285 'version)\n'
286 286 'use (l)ocal source (%s) or (r)emote source (%s)?'
287 287 '$$ &Local $$ &Remote')
288 288 % (subrelpath(sub), local, remote))
289 289 return ui.promptchoice(msg, 0)
290 290
291 291 def reporelpath(repo):
292 292 """return path to this (sub)repo as seen from outermost repo"""
293 293 parent = repo
294 294 while util.safehasattr(parent, '_subparent'):
295 295 parent = parent._subparent
296 296 return repo.root[len(pathutil.normasprefix(parent.root)):]
297 297
298 298 def subrelpath(sub):
299 299 """return path to this subrepo as seen from outermost repo"""
300 300 return sub._relpath
301 301
302 302 def _abssource(repo, push=False, abort=True):
303 303 """return pull/push path of repo - either based on parent repo .hgsub info
304 304 or on the top repo config. Abort or return None if no source found."""
305 305 if util.safehasattr(repo, '_subparent'):
306 306 source = util.url(repo._subsource)
307 307 if source.isabs():
308 308 return str(source)
309 309 source.path = posixpath.normpath(source.path)
310 310 parent = _abssource(repo._subparent, push, abort=False)
311 311 if parent:
312 312 parent = util.url(util.pconvert(parent))
313 313 parent.path = posixpath.join(parent.path or '', source.path)
314 314 parent.path = posixpath.normpath(parent.path)
315 315 return str(parent)
316 316 else: # recursion reached top repo
317 317 if util.safehasattr(repo, '_subtoppath'):
318 318 return repo._subtoppath
319 319 if push and repo.ui.config('paths', 'default-push'):
320 320 return repo.ui.config('paths', 'default-push')
321 321 if repo.ui.config('paths', 'default'):
322 322 return repo.ui.config('paths', 'default')
323 323 if repo.shared():
324 324 # chop off the .hg component to get the default path form
325 325 return os.path.dirname(repo.sharedpath)
326 326 if abort:
327 327 raise error.Abort(_("default path for subrepository not found"))
328 328
329 329 def _sanitize(ui, vfs, ignore):
330 330 for dirname, dirs, names in vfs.walk():
331 331 for i, d in enumerate(dirs):
332 332 if d.lower() == ignore:
333 333 del dirs[i]
334 334 break
335 335 if vfs.basename(dirname).lower() != '.hg':
336 336 continue
337 337 for f in names:
338 338 if f.lower() == 'hgrc':
339 339 ui.warn(_("warning: removing potentially hostile 'hgrc' "
340 340 "in '%s'\n") % vfs.join(dirname))
341 341 vfs.unlink(vfs.reljoin(dirname, f))
342 342
343 def subrepo(ctx, path, allowwdir=False):
343 def subrepo(ctx, path, allowwdir=False, allowcreate=True):
344 344 """return instance of the right subrepo class for subrepo in path"""
345 345 # subrepo inherently violates our import layering rules
346 346 # because it wants to make repo objects from deep inside the stack
347 347 # so we manually delay the circular imports to not break
348 348 # scripts that don't use our demand-loading
349 349 global hg
350 350 from . import hg as h
351 351 hg = h
352 352
353 353 pathutil.pathauditor(ctx.repo().root)(path)
354 354 state = ctx.substate[path]
355 355 if state[2] not in types:
356 356 raise error.Abort(_('unknown subrepo type %s') % state[2])
357 357 if allowwdir:
358 358 state = (state[0], ctx.subrev(path), state[2])
359 return types[state[2]](ctx, path, state[:2])
359 return types[state[2]](ctx, path, state[:2], allowcreate)
360 360
361 361 def nullsubrepo(ctx, path, pctx):
362 362 """return an empty subrepo in pctx for the extant subrepo in ctx"""
363 363 # subrepo inherently violates our import layering rules
364 364 # because it wants to make repo objects from deep inside the stack
365 365 # so we manually delay the circular imports to not break
366 366 # scripts that don't use our demand-loading
367 367 global hg
368 368 from . import hg as h
369 369 hg = h
370 370
371 371 pathutil.pathauditor(ctx.repo().root)(path)
372 372 state = ctx.substate[path]
373 373 if state[2] not in types:
374 374 raise error.Abort(_('unknown subrepo type %s') % state[2])
375 375 subrev = ''
376 376 if state[2] == 'hg':
377 377 subrev = "0" * 40
378 return types[state[2]](pctx, path, (state[0], subrev))
378 return types[state[2]](pctx, path, (state[0], subrev), True)
379 379
380 380 def newcommitphase(ui, ctx):
381 381 commitphase = phases.newcommitphase(ui)
382 382 substate = getattr(ctx, "substate", None)
383 383 if not substate:
384 384 return commitphase
385 385 check = ui.config('phases', 'checksubrepos', 'follow')
386 386 if check not in ('ignore', 'follow', 'abort'):
387 387 raise error.Abort(_('invalid phases.checksubrepos configuration: %s')
388 388 % (check))
389 389 if check == 'ignore':
390 390 return commitphase
391 391 maxphase = phases.public
392 392 maxsub = None
393 393 for s in sorted(substate):
394 394 sub = ctx.sub(s)
395 395 subphase = sub.phase(substate[s][1])
396 396 if maxphase < subphase:
397 397 maxphase = subphase
398 398 maxsub = s
399 399 if commitphase < maxphase:
400 400 if check == 'abort':
401 401 raise error.Abort(_("can't commit in %s phase"
402 402 " conflicting %s from subrepository %s") %
403 403 (phases.phasenames[commitphase],
404 404 phases.phasenames[maxphase], maxsub))
405 405 ui.warn(_("warning: changes are committed in"
406 406 " %s phase from subrepository %s\n") %
407 407 (phases.phasenames[maxphase], maxsub))
408 408 return maxphase
409 409 return commitphase
410 410
411 411 # subrepo classes need to implement the following abstract class:
412 412
413 413 class abstractsubrepo(object):
414 414
415 415 def __init__(self, ctx, path):
416 416 """Initialize abstractsubrepo part
417 417
418 418 ``ctx`` is the context referring this subrepository in the
419 419 parent repository.
420 420
421 421 ``path`` is the path to this subrepository as seen from
422 422 innermost repository.
423 423 """
424 424 self.ui = ctx.repo().ui
425 425 self._ctx = ctx
426 426 self._path = path
427 427
428 428 def storeclean(self, path):
429 429 """
430 430 returns true if the repository has not changed since it was last
431 431 cloned from or pushed to a given repository.
432 432 """
433 433 return False
434 434
435 435 def dirty(self, ignoreupdate=False):
436 436 """returns true if the dirstate of the subrepo is dirty or does not
437 437 match current stored state. If ignoreupdate is true, only check
438 438 whether the subrepo has uncommitted changes in its dirstate.
439 439 """
440 440 raise NotImplementedError
441 441
442 442 def dirtyreason(self, ignoreupdate=False):
443 443 """return reason string if it is ``dirty()``
444 444
445 445 Returned string should have enough information for the message
446 446 of exception.
447 447
448 448 This returns None, otherwise.
449 449 """
450 450 if self.dirty(ignoreupdate=ignoreupdate):
451 451 return _("uncommitted changes in subrepository '%s'"
452 452 ) % subrelpath(self)
453 453
454 454 def bailifchanged(self, ignoreupdate=False):
455 455 """raise Abort if subrepository is ``dirty()``
456 456 """
457 457 dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate)
458 458 if dirtyreason:
459 459 raise error.Abort(dirtyreason)
460 460
461 461 def basestate(self):
462 462 """current working directory base state, disregarding .hgsubstate
463 463 state and working directory modifications"""
464 464 raise NotImplementedError
465 465
466 466 def checknested(self, path):
467 467 """check if path is a subrepository within this repository"""
468 468 return False
469 469
470 470 def commit(self, text, user, date):
471 471 """commit the current changes to the subrepo with the given
472 472 log message. Use given user and date if possible. Return the
473 473 new state of the subrepo.
474 474 """
475 475 raise NotImplementedError
476 476
477 477 def phase(self, state):
478 478 """returns phase of specified state in the subrepository.
479 479 """
480 480 return phases.public
481 481
482 482 def remove(self):
483 483 """remove the subrepo
484 484
485 485 (should verify the dirstate is not dirty first)
486 486 """
487 487 raise NotImplementedError
488 488
489 489 def get(self, state, overwrite=False):
490 490 """run whatever commands are needed to put the subrepo into
491 491 this state
492 492 """
493 493 raise NotImplementedError
494 494
495 495 def merge(self, state):
496 496 """merge currently-saved state with the new state."""
497 497 raise NotImplementedError
498 498
499 499 def push(self, opts):
500 500 """perform whatever action is analogous to 'hg push'
501 501
502 502 This may be a no-op on some systems.
503 503 """
504 504 raise NotImplementedError
505 505
506 506 def add(self, ui, match, prefix, explicitonly, **opts):
507 507 return []
508 508
509 509 def addremove(self, matcher, prefix, opts, dry_run, similarity):
510 510 self.ui.warn("%s: %s" % (prefix, _("addremove is not supported")))
511 511 return 1
512 512
513 513 def cat(self, match, prefix, **opts):
514 514 return 1
515 515
516 516 def status(self, rev2, **opts):
517 517 return scmutil.status([], [], [], [], [], [], [])
518 518
519 519 def diff(self, ui, diffopts, node2, match, prefix, **opts):
520 520 pass
521 521
522 522 def outgoing(self, ui, dest, opts):
523 523 return 1
524 524
525 525 def incoming(self, ui, source, opts):
526 526 return 1
527 527
528 528 def files(self):
529 529 """return filename iterator"""
530 530 raise NotImplementedError
531 531
532 532 def filedata(self, name):
533 533 """return file data"""
534 534 raise NotImplementedError
535 535
536 536 def fileflags(self, name):
537 537 """return file flags"""
538 538 return ''
539 539
540 540 def getfileset(self, expr):
541 541 """Resolve the fileset expression for this repo"""
542 542 return set()
543 543
544 544 def printfiles(self, ui, m, fm, fmt, subrepos):
545 545 """handle the files command for this subrepo"""
546 546 return 1
547 547
548 548 def archive(self, archiver, prefix, match=None):
549 549 if match is not None:
550 550 files = [f for f in self.files() if match(f)]
551 551 else:
552 552 files = self.files()
553 553 total = len(files)
554 554 relpath = subrelpath(self)
555 555 self.ui.progress(_('archiving (%s)') % relpath, 0,
556 556 unit=_('files'), total=total)
557 557 for i, name in enumerate(files):
558 558 flags = self.fileflags(name)
559 559 mode = 'x' in flags and 0o755 or 0o644
560 560 symlink = 'l' in flags
561 561 archiver.addfile(prefix + self._path + '/' + name,
562 562 mode, symlink, self.filedata(name))
563 563 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
564 564 unit=_('files'), total=total)
565 565 self.ui.progress(_('archiving (%s)') % relpath, None)
566 566 return total
567 567
568 568 def walk(self, match):
569 569 '''
570 570 walk recursively through the directory tree, finding all files
571 571 matched by the match function
572 572 '''
573 573 pass
574 574
575 575 def forget(self, match, prefix):
576 576 return ([], [])
577 577
578 578 def removefiles(self, matcher, prefix, after, force, subrepos, warnings):
579 579 """remove the matched files from the subrepository and the filesystem,
580 580 possibly by force and/or after the file has been removed from the
581 581 filesystem. Return 0 on success, 1 on any warning.
582 582 """
583 583 warnings.append(_("warning: removefiles not implemented (%s)")
584 584 % self._path)
585 585 return 1
586 586
587 587 def revert(self, substate, *pats, **opts):
588 588 self.ui.warn('%s: reverting %s subrepos is unsupported\n' \
589 589 % (substate[0], substate[2]))
590 590 return []
591 591
592 592 def shortid(self, revid):
593 593 return revid
594 594
595 595 def verify(self):
596 596 '''verify the integrity of the repository. Return 0 on success or
597 597 warning, 1 on any error.
598 598 '''
599 599 return 0
600 600
601 601 @propertycache
602 602 def wvfs(self):
603 603 """return vfs to access the working directory of this subrepository
604 604 """
605 605 return scmutil.vfs(self._ctx.repo().wvfs.join(self._path))
606 606
607 607 @propertycache
608 608 def _relpath(self):
609 609 """return path to this subrepository as seen from outermost repository
610 610 """
611 611 return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path)
612 612
613 613 class hgsubrepo(abstractsubrepo):
614 def __init__(self, ctx, path, state):
614 def __init__(self, ctx, path, state, allowcreate):
615 615 super(hgsubrepo, self).__init__(ctx, path)
616 616 self._state = state
617 617 r = ctx.repo()
618 618 root = r.wjoin(path)
619 create = not r.wvfs.exists('%s/.hg' % path)
619 create = allowcreate and not r.wvfs.exists('%s/.hg' % path)
620 620 self._repo = hg.repository(r.baseui, root, create=create)
621 621
622 622 # Propagate the parent's --hidden option
623 623 if r is r.unfiltered():
624 624 self._repo = self._repo.unfiltered()
625 625
626 626 self.ui = self._repo.ui
627 627 for s, k in [('ui', 'commitsubrepos')]:
628 628 v = r.ui.config(s, k)
629 629 if v:
630 630 self.ui.setconfig(s, k, v, 'subrepo')
631 631 # internal config: ui._usedassubrepo
632 632 self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
633 633 self._initrepo(r, state[0], create)
634 634
635 635 def storeclean(self, path):
636 636 with self._repo.lock():
637 637 return self._storeclean(path)
638 638
639 639 def _storeclean(self, path):
640 640 clean = True
641 641 itercache = self._calcstorehash(path)
642 642 for filehash in self._readstorehashcache(path):
643 643 if filehash != next(itercache, None):
644 644 clean = False
645 645 break
646 646 if clean:
647 647 # if not empty:
648 648 # the cached and current pull states have a different size
649 649 clean = next(itercache, None) is None
650 650 return clean
651 651
652 652 def _calcstorehash(self, remotepath):
653 653 '''calculate a unique "store hash"
654 654
655 655 This method is used to to detect when there are changes that may
656 656 require a push to a given remote path.'''
657 657 # sort the files that will be hashed in increasing (likely) file size
658 658 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
659 659 yield '# %s\n' % _expandedabspath(remotepath)
660 660 vfs = self._repo.vfs
661 661 for relname in filelist:
662 662 filehash = util.sha1(vfs.tryread(relname)).hexdigest()
663 663 yield '%s = %s\n' % (relname, filehash)
664 664
665 665 @propertycache
666 666 def _cachestorehashvfs(self):
667 667 return scmutil.vfs(self._repo.join('cache/storehash'))
668 668
669 669 def _readstorehashcache(self, remotepath):
670 670 '''read the store hash cache for a given remote repository'''
671 671 cachefile = _getstorehashcachename(remotepath)
672 672 return self._cachestorehashvfs.tryreadlines(cachefile, 'r')
673 673
674 674 def _cachestorehash(self, remotepath):
675 675 '''cache the current store hash
676 676
677 677 Each remote repo requires its own store hash cache, because a subrepo
678 678 store may be "clean" versus a given remote repo, but not versus another
679 679 '''
680 680 cachefile = _getstorehashcachename(remotepath)
681 681 with self._repo.lock():
682 682 storehash = list(self._calcstorehash(remotepath))
683 683 vfs = self._cachestorehashvfs
684 684 vfs.writelines(cachefile, storehash, mode='w', notindexed=True)
685 685
686 686 def _getctx(self):
687 687 '''fetch the context for this subrepo revision, possibly a workingctx
688 688 '''
689 689 if self._ctx.rev() is None:
690 690 return self._repo[None] # workingctx if parent is workingctx
691 691 else:
692 692 rev = self._state[1]
693 693 return self._repo[rev]
694 694
695 695 @annotatesubrepoerror
696 696 def _initrepo(self, parentrepo, source, create):
697 697 self._repo._subparent = parentrepo
698 698 self._repo._subsource = source
699 699
700 700 if create:
701 701 lines = ['[paths]\n']
702 702
703 703 def addpathconfig(key, value):
704 704 if value:
705 705 lines.append('%s = %s\n' % (key, value))
706 706 self.ui.setconfig('paths', key, value, 'subrepo')
707 707
708 708 defpath = _abssource(self._repo, abort=False)
709 709 defpushpath = _abssource(self._repo, True, abort=False)
710 710 addpathconfig('default', defpath)
711 711 if defpath != defpushpath:
712 712 addpathconfig('default-push', defpushpath)
713 713
714 714 fp = self._repo.vfs("hgrc", "w", text=True)
715 715 try:
716 716 fp.write(''.join(lines))
717 717 finally:
718 718 fp.close()
719 719
720 720 @annotatesubrepoerror
721 721 def add(self, ui, match, prefix, explicitonly, **opts):
722 722 return cmdutil.add(ui, self._repo, match,
723 723 self.wvfs.reljoin(prefix, self._path),
724 724 explicitonly, **opts)
725 725
726 726 @annotatesubrepoerror
727 727 def addremove(self, m, prefix, opts, dry_run, similarity):
728 728 # In the same way as sub directories are processed, once in a subrepo,
729 729 # always entry any of its subrepos. Don't corrupt the options that will
730 730 # be used to process sibling subrepos however.
731 731 opts = copy.copy(opts)
732 732 opts['subrepos'] = True
733 733 return scmutil.addremove(self._repo, m,
734 734 self.wvfs.reljoin(prefix, self._path), opts,
735 735 dry_run, similarity)
736 736
737 737 @annotatesubrepoerror
738 738 def cat(self, match, prefix, **opts):
739 739 rev = self._state[1]
740 740 ctx = self._repo[rev]
741 741 return cmdutil.cat(self.ui, self._repo, ctx, match, prefix, **opts)
742 742
743 743 @annotatesubrepoerror
744 744 def status(self, rev2, **opts):
745 745 try:
746 746 rev1 = self._state[1]
747 747 ctx1 = self._repo[rev1]
748 748 ctx2 = self._repo[rev2]
749 749 return self._repo.status(ctx1, ctx2, **opts)
750 750 except error.RepoLookupError as inst:
751 751 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
752 752 % (inst, subrelpath(self)))
753 753 return scmutil.status([], [], [], [], [], [], [])
754 754
755 755 @annotatesubrepoerror
756 756 def diff(self, ui, diffopts, node2, match, prefix, **opts):
757 757 try:
758 758 node1 = node.bin(self._state[1])
759 759 # We currently expect node2 to come from substate and be
760 760 # in hex format
761 761 if node2 is not None:
762 762 node2 = node.bin(node2)
763 763 cmdutil.diffordiffstat(ui, self._repo, diffopts,
764 764 node1, node2, match,
765 765 prefix=posixpath.join(prefix, self._path),
766 766 listsubrepos=True, **opts)
767 767 except error.RepoLookupError as inst:
768 768 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
769 769 % (inst, subrelpath(self)))
770 770
771 771 @annotatesubrepoerror
772 772 def archive(self, archiver, prefix, match=None):
773 773 self._get(self._state + ('hg',))
774 774 total = abstractsubrepo.archive(self, archiver, prefix, match)
775 775 rev = self._state[1]
776 776 ctx = self._repo[rev]
777 777 for subpath in ctx.substate:
778 778 s = subrepo(ctx, subpath, True)
779 779 submatch = matchmod.subdirmatcher(subpath, match)
780 780 total += s.archive(archiver, prefix + self._path + '/', submatch)
781 781 return total
782 782
783 783 @annotatesubrepoerror
784 784 def dirty(self, ignoreupdate=False):
785 785 r = self._state[1]
786 786 if r == '' and not ignoreupdate: # no state recorded
787 787 return True
788 788 w = self._repo[None]
789 789 if r != w.p1().hex() and not ignoreupdate:
790 790 # different version checked out
791 791 return True
792 792 return w.dirty() # working directory changed
793 793
794 794 def basestate(self):
795 795 return self._repo['.'].hex()
796 796
797 797 def checknested(self, path):
798 798 return self._repo._checknested(self._repo.wjoin(path))
799 799
800 800 @annotatesubrepoerror
801 801 def commit(self, text, user, date):
802 802 # don't bother committing in the subrepo if it's only been
803 803 # updated
804 804 if not self.dirty(True):
805 805 return self._repo['.'].hex()
806 806 self.ui.debug("committing subrepo %s\n" % subrelpath(self))
807 807 n = self._repo.commit(text, user, date)
808 808 if not n:
809 809 return self._repo['.'].hex() # different version checked out
810 810 return node.hex(n)
811 811
812 812 @annotatesubrepoerror
813 813 def phase(self, state):
814 814 return self._repo[state].phase()
815 815
816 816 @annotatesubrepoerror
817 817 def remove(self):
818 818 # we can't fully delete the repository as it may contain
819 819 # local-only history
820 820 self.ui.note(_('removing subrepo %s\n') % subrelpath(self))
821 821 hg.clean(self._repo, node.nullid, False)
822 822
823 823 def _get(self, state):
824 824 source, revision, kind = state
825 825 if revision in self._repo.unfiltered():
826 826 return True
827 827 self._repo._subsource = source
828 828 srcurl = _abssource(self._repo)
829 829 other = hg.peer(self._repo, {}, srcurl)
830 830 if len(self._repo) == 0:
831 831 self.ui.status(_('cloning subrepo %s from %s\n')
832 832 % (subrelpath(self), srcurl))
833 833 parentrepo = self._repo._subparent
834 834 # use self._repo.vfs instead of self.wvfs to remove .hg only
835 835 self._repo.vfs.rmtree()
836 836 other, cloned = hg.clone(self._repo._subparent.baseui, {},
837 837 other, self._repo.root,
838 838 update=False)
839 839 self._repo = cloned.local()
840 840 self._initrepo(parentrepo, source, create=True)
841 841 self._cachestorehash(srcurl)
842 842 else:
843 843 self.ui.status(_('pulling subrepo %s from %s\n')
844 844 % (subrelpath(self), srcurl))
845 845 cleansub = self.storeclean(srcurl)
846 846 exchange.pull(self._repo, other)
847 847 if cleansub:
848 848 # keep the repo clean after pull
849 849 self._cachestorehash(srcurl)
850 850 return False
851 851
852 852 @annotatesubrepoerror
853 853 def get(self, state, overwrite=False):
854 854 inrepo = self._get(state)
855 855 source, revision, kind = state
856 856 repo = self._repo
857 857 repo.ui.debug("getting subrepo %s\n" % self._path)
858 858 if inrepo:
859 859 urepo = repo.unfiltered()
860 860 ctx = urepo[revision]
861 861 if ctx.hidden():
862 862 urepo.ui.warn(
863 863 _('revision %s in subrepo %s is hidden\n') \
864 864 % (revision[0:12], self._path))
865 865 repo = urepo
866 866 hg.updaterepo(repo, revision, overwrite)
867 867
868 868 @annotatesubrepoerror
869 869 def merge(self, state):
870 870 self._get(state)
871 871 cur = self._repo['.']
872 872 dst = self._repo[state[1]]
873 873 anc = dst.ancestor(cur)
874 874
875 875 def mergefunc():
876 876 if anc == cur and dst.branch() == cur.branch():
877 877 self.ui.debug("updating subrepo %s\n" % subrelpath(self))
878 878 hg.update(self._repo, state[1])
879 879 elif anc == dst:
880 880 self.ui.debug("skipping subrepo %s\n" % subrelpath(self))
881 881 else:
882 882 self.ui.debug("merging subrepo %s\n" % subrelpath(self))
883 883 hg.merge(self._repo, state[1], remind=False)
884 884
885 885 wctx = self._repo[None]
886 886 if self.dirty():
887 887 if anc != dst:
888 888 if _updateprompt(self.ui, self, wctx.dirty(), cur, dst):
889 889 mergefunc()
890 890 else:
891 891 mergefunc()
892 892 else:
893 893 mergefunc()
894 894
895 895 @annotatesubrepoerror
896 896 def push(self, opts):
897 897 force = opts.get('force')
898 898 newbranch = opts.get('new_branch')
899 899 ssh = opts.get('ssh')
900 900
901 901 # push subrepos depth-first for coherent ordering
902 902 c = self._repo['']
903 903 subs = c.substate # only repos that are committed
904 904 for s in sorted(subs):
905 905 if c.sub(s).push(opts) == 0:
906 906 return False
907 907
908 908 dsturl = _abssource(self._repo, True)
909 909 if not force:
910 910 if self.storeclean(dsturl):
911 911 self.ui.status(
912 912 _('no changes made to subrepo %s since last push to %s\n')
913 913 % (subrelpath(self), dsturl))
914 914 return None
915 915 self.ui.status(_('pushing subrepo %s to %s\n') %
916 916 (subrelpath(self), dsturl))
917 917 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
918 918 res = exchange.push(self._repo, other, force, newbranch=newbranch)
919 919
920 920 # the repo is now clean
921 921 self._cachestorehash(dsturl)
922 922 return res.cgresult
923 923
924 924 @annotatesubrepoerror
925 925 def outgoing(self, ui, dest, opts):
926 926 if 'rev' in opts or 'branch' in opts:
927 927 opts = copy.copy(opts)
928 928 opts.pop('rev', None)
929 929 opts.pop('branch', None)
930 930 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
931 931
932 932 @annotatesubrepoerror
933 933 def incoming(self, ui, source, opts):
934 934 if 'rev' in opts or 'branch' in opts:
935 935 opts = copy.copy(opts)
936 936 opts.pop('rev', None)
937 937 opts.pop('branch', None)
938 938 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
939 939
940 940 @annotatesubrepoerror
941 941 def files(self):
942 942 rev = self._state[1]
943 943 ctx = self._repo[rev]
944 944 return ctx.manifest().keys()
945 945
946 946 def filedata(self, name):
947 947 rev = self._state[1]
948 948 return self._repo[rev][name].data()
949 949
950 950 def fileflags(self, name):
951 951 rev = self._state[1]
952 952 ctx = self._repo[rev]
953 953 return ctx.flags(name)
954 954
955 955 @annotatesubrepoerror
956 956 def printfiles(self, ui, m, fm, fmt, subrepos):
957 957 # If the parent context is a workingctx, use the workingctx here for
958 958 # consistency.
959 959 if self._ctx.rev() is None:
960 960 ctx = self._repo[None]
961 961 else:
962 962 rev = self._state[1]
963 963 ctx = self._repo[rev]
964 964 return cmdutil.files(ui, ctx, m, fm, fmt, subrepos)
965 965
966 966 @annotatesubrepoerror
967 967 def getfileset(self, expr):
968 968 if self._ctx.rev() is None:
969 969 ctx = self._repo[None]
970 970 else:
971 971 rev = self._state[1]
972 972 ctx = self._repo[rev]
973 973
974 974 files = ctx.getfileset(expr)
975 975
976 976 for subpath in ctx.substate:
977 977 sub = ctx.sub(subpath)
978 978
979 979 try:
980 980 files.extend(subpath + '/' + f for f in sub.getfileset(expr))
981 981 except error.LookupError:
982 982 self.ui.status(_("skipping missing subrepository: %s\n")
983 983 % self.wvfs.reljoin(reporelpath(self), subpath))
984 984 return files
985 985
986 986 def walk(self, match):
987 987 ctx = self._repo[None]
988 988 return ctx.walk(match)
989 989
990 990 @annotatesubrepoerror
991 991 def forget(self, match, prefix):
992 992 return cmdutil.forget(self.ui, self._repo, match,
993 993 self.wvfs.reljoin(prefix, self._path), True)
994 994
995 995 @annotatesubrepoerror
996 996 def removefiles(self, matcher, prefix, after, force, subrepos, warnings):
997 997 return cmdutil.remove(self.ui, self._repo, matcher,
998 998 self.wvfs.reljoin(prefix, self._path),
999 999 after, force, subrepos)
1000 1000
1001 1001 @annotatesubrepoerror
1002 1002 def revert(self, substate, *pats, **opts):
1003 1003 # reverting a subrepo is a 2 step process:
1004 1004 # 1. if the no_backup is not set, revert all modified
1005 1005 # files inside the subrepo
1006 1006 # 2. update the subrepo to the revision specified in
1007 1007 # the corresponding substate dictionary
1008 1008 self.ui.status(_('reverting subrepo %s\n') % substate[0])
1009 1009 if not opts.get('no_backup'):
1010 1010 # Revert all files on the subrepo, creating backups
1011 1011 # Note that this will not recursively revert subrepos
1012 1012 # We could do it if there was a set:subrepos() predicate
1013 1013 opts = opts.copy()
1014 1014 opts['date'] = None
1015 1015 opts['rev'] = substate[1]
1016 1016
1017 1017 self.filerevert(*pats, **opts)
1018 1018
1019 1019 # Update the repo to the revision specified in the given substate
1020 1020 if not opts.get('dry_run'):
1021 1021 self.get(substate, overwrite=True)
1022 1022
1023 1023 def filerevert(self, *pats, **opts):
1024 1024 ctx = self._repo[opts['rev']]
1025 1025 parents = self._repo.dirstate.parents()
1026 1026 if opts.get('all'):
1027 1027 pats = ['set:modified()']
1028 1028 else:
1029 1029 pats = []
1030 1030 cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
1031 1031
1032 1032 def shortid(self, revid):
1033 1033 return revid[:12]
1034 1034
1035 1035 def verify(self):
1036 1036 try:
1037 1037 rev = self._state[1]
1038 1038 ctx = self._repo.unfiltered()[rev]
1039 1039 if ctx.hidden():
1040 1040 # Since hidden revisions aren't pushed/pulled, it seems worth an
1041 1041 # explicit warning.
1042 1042 ui = self._repo.ui
1043 1043 ui.warn(_("subrepo '%s' is hidden in revision %s\n") %
1044 1044 (self._relpath, node.short(self._ctx.node())))
1045 1045 return 0
1046 1046 except error.RepoLookupError:
1047 1047 # A missing subrepo revision may be a case of needing to pull it, so
1048 1048 # don't treat this as an error.
1049 1049 self._repo.ui.warn(_("subrepo '%s' not found in revision %s\n") %
1050 1050 (self._relpath, node.short(self._ctx.node())))
1051 1051 return 0
1052 1052
1053 1053 @propertycache
1054 1054 def wvfs(self):
1055 1055 """return own wvfs for efficiency and consistency
1056 1056 """
1057 1057 return self._repo.wvfs
1058 1058
1059 1059 @propertycache
1060 1060 def _relpath(self):
1061 1061 """return path to this subrepository as seen from outermost repository
1062 1062 """
1063 1063 # Keep consistent dir separators by avoiding vfs.join(self._path)
1064 1064 return reporelpath(self._repo)
1065 1065
1066 1066 class svnsubrepo(abstractsubrepo):
1067 def __init__(self, ctx, path, state):
1067 def __init__(self, ctx, path, state, allowcreate):
1068 1068 super(svnsubrepo, self).__init__(ctx, path)
1069 1069 self._state = state
1070 1070 self._exe = util.findexe('svn')
1071 1071 if not self._exe:
1072 1072 raise error.Abort(_("'svn' executable not found for subrepo '%s'")
1073 1073 % self._path)
1074 1074
1075 1075 def _svncommand(self, commands, filename='', failok=False):
1076 1076 cmd = [self._exe]
1077 1077 extrakw = {}
1078 1078 if not self.ui.interactive():
1079 1079 # Making stdin be a pipe should prevent svn from behaving
1080 1080 # interactively even if we can't pass --non-interactive.
1081 1081 extrakw['stdin'] = subprocess.PIPE
1082 1082 # Starting in svn 1.5 --non-interactive is a global flag
1083 1083 # instead of being per-command, but we need to support 1.4 so
1084 1084 # we have to be intelligent about what commands take
1085 1085 # --non-interactive.
1086 1086 if commands[0] in ('update', 'checkout', 'commit'):
1087 1087 cmd.append('--non-interactive')
1088 1088 cmd.extend(commands)
1089 1089 if filename is not None:
1090 1090 path = self.wvfs.reljoin(self._ctx.repo().origroot,
1091 1091 self._path, filename)
1092 1092 cmd.append(path)
1093 1093 env = dict(os.environ)
1094 1094 # Avoid localized output, preserve current locale for everything else.
1095 1095 lc_all = env.get('LC_ALL')
1096 1096 if lc_all:
1097 1097 env['LANG'] = lc_all
1098 1098 del env['LC_ALL']
1099 1099 env['LC_MESSAGES'] = 'C'
1100 1100 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
1101 1101 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1102 1102 universal_newlines=True, env=env, **extrakw)
1103 1103 stdout, stderr = p.communicate()
1104 1104 stderr = stderr.strip()
1105 1105 if not failok:
1106 1106 if p.returncode:
1107 1107 raise error.Abort(stderr or 'exited with code %d'
1108 1108 % p.returncode)
1109 1109 if stderr:
1110 1110 self.ui.warn(stderr + '\n')
1111 1111 return stdout, stderr
1112 1112
1113 1113 @propertycache
1114 1114 def _svnversion(self):
1115 1115 output, err = self._svncommand(['--version', '--quiet'], filename=None)
1116 1116 m = re.search(r'^(\d+)\.(\d+)', output)
1117 1117 if not m:
1118 1118 raise error.Abort(_('cannot retrieve svn tool version'))
1119 1119 return (int(m.group(1)), int(m.group(2)))
1120 1120
1121 1121 def _wcrevs(self):
1122 1122 # Get the working directory revision as well as the last
1123 1123 # commit revision so we can compare the subrepo state with
1124 1124 # both. We used to store the working directory one.
1125 1125 output, err = self._svncommand(['info', '--xml'])
1126 1126 doc = xml.dom.minidom.parseString(output)
1127 1127 entries = doc.getElementsByTagName('entry')
1128 1128 lastrev, rev = '0', '0'
1129 1129 if entries:
1130 1130 rev = str(entries[0].getAttribute('revision')) or '0'
1131 1131 commits = entries[0].getElementsByTagName('commit')
1132 1132 if commits:
1133 1133 lastrev = str(commits[0].getAttribute('revision')) or '0'
1134 1134 return (lastrev, rev)
1135 1135
1136 1136 def _wcrev(self):
1137 1137 return self._wcrevs()[0]
1138 1138
1139 1139 def _wcchanged(self):
1140 1140 """Return (changes, extchanges, missing) where changes is True
1141 1141 if the working directory was changed, extchanges is
1142 1142 True if any of these changes concern an external entry and missing
1143 1143 is True if any change is a missing entry.
1144 1144 """
1145 1145 output, err = self._svncommand(['status', '--xml'])
1146 1146 externals, changes, missing = [], [], []
1147 1147 doc = xml.dom.minidom.parseString(output)
1148 1148 for e in doc.getElementsByTagName('entry'):
1149 1149 s = e.getElementsByTagName('wc-status')
1150 1150 if not s:
1151 1151 continue
1152 1152 item = s[0].getAttribute('item')
1153 1153 props = s[0].getAttribute('props')
1154 1154 path = e.getAttribute('path')
1155 1155 if item == 'external':
1156 1156 externals.append(path)
1157 1157 elif item == 'missing':
1158 1158 missing.append(path)
1159 1159 if (item not in ('', 'normal', 'unversioned', 'external')
1160 1160 or props not in ('', 'none', 'normal')):
1161 1161 changes.append(path)
1162 1162 for path in changes:
1163 1163 for ext in externals:
1164 1164 if path == ext or path.startswith(ext + os.sep):
1165 1165 return True, True, bool(missing)
1166 1166 return bool(changes), False, bool(missing)
1167 1167
1168 1168 def dirty(self, ignoreupdate=False):
1169 1169 if not self._wcchanged()[0]:
1170 1170 if self._state[1] in self._wcrevs() or ignoreupdate:
1171 1171 return False
1172 1172 return True
1173 1173
1174 1174 def basestate(self):
1175 1175 lastrev, rev = self._wcrevs()
1176 1176 if lastrev != rev:
1177 1177 # Last committed rev is not the same than rev. We would
1178 1178 # like to take lastrev but we do not know if the subrepo
1179 1179 # URL exists at lastrev. Test it and fallback to rev it
1180 1180 # is not there.
1181 1181 try:
1182 1182 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
1183 1183 return lastrev
1184 1184 except error.Abort:
1185 1185 pass
1186 1186 return rev
1187 1187
1188 1188 @annotatesubrepoerror
1189 1189 def commit(self, text, user, date):
1190 1190 # user and date are out of our hands since svn is centralized
1191 1191 changed, extchanged, missing = self._wcchanged()
1192 1192 if not changed:
1193 1193 return self.basestate()
1194 1194 if extchanged:
1195 1195 # Do not try to commit externals
1196 1196 raise error.Abort(_('cannot commit svn externals'))
1197 1197 if missing:
1198 1198 # svn can commit with missing entries but aborting like hg
1199 1199 # seems a better approach.
1200 1200 raise error.Abort(_('cannot commit missing svn entries'))
1201 1201 commitinfo, err = self._svncommand(['commit', '-m', text])
1202 1202 self.ui.status(commitinfo)
1203 1203 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1204 1204 if not newrev:
1205 1205 if not commitinfo.strip():
1206 1206 # Sometimes, our definition of "changed" differs from
1207 1207 # svn one. For instance, svn ignores missing files
1208 1208 # when committing. If there are only missing files, no
1209 1209 # commit is made, no output and no error code.
1210 1210 raise error.Abort(_('failed to commit svn changes'))
1211 1211 raise error.Abort(commitinfo.splitlines()[-1])
1212 1212 newrev = newrev.groups()[0]
1213 1213 self.ui.status(self._svncommand(['update', '-r', newrev])[0])
1214 1214 return newrev
1215 1215
1216 1216 @annotatesubrepoerror
1217 1217 def remove(self):
1218 1218 if self.dirty():
1219 1219 self.ui.warn(_('not removing repo %s because '
1220 1220 'it has changes.\n') % self._path)
1221 1221 return
1222 1222 self.ui.note(_('removing subrepo %s\n') % self._path)
1223 1223
1224 1224 self.wvfs.rmtree(forcibly=True)
1225 1225 try:
1226 1226 pwvfs = self._ctx.repo().wvfs
1227 1227 pwvfs.removedirs(pwvfs.dirname(self._path))
1228 1228 except OSError:
1229 1229 pass
1230 1230
1231 1231 @annotatesubrepoerror
1232 1232 def get(self, state, overwrite=False):
1233 1233 if overwrite:
1234 1234 self._svncommand(['revert', '--recursive'])
1235 1235 args = ['checkout']
1236 1236 if self._svnversion >= (1, 5):
1237 1237 args.append('--force')
1238 1238 # The revision must be specified at the end of the URL to properly
1239 1239 # update to a directory which has since been deleted and recreated.
1240 1240 args.append('%s@%s' % (state[0], state[1]))
1241 1241 status, err = self._svncommand(args, failok=True)
1242 1242 _sanitize(self.ui, self.wvfs, '.svn')
1243 1243 if not re.search('Checked out revision [0-9]+.', status):
1244 1244 if ('is already a working copy for a different URL' in err
1245 1245 and (self._wcchanged()[:2] == (False, False))):
1246 1246 # obstructed but clean working copy, so just blow it away.
1247 1247 self.remove()
1248 1248 self.get(state, overwrite=False)
1249 1249 return
1250 1250 raise error.Abort((status or err).splitlines()[-1])
1251 1251 self.ui.status(status)
1252 1252
1253 1253 @annotatesubrepoerror
1254 1254 def merge(self, state):
1255 1255 old = self._state[1]
1256 1256 new = state[1]
1257 1257 wcrev = self._wcrev()
1258 1258 if new != wcrev:
1259 1259 dirty = old == wcrev or self._wcchanged()[0]
1260 1260 if _updateprompt(self.ui, self, dirty, wcrev, new):
1261 1261 self.get(state, False)
1262 1262
1263 1263 def push(self, opts):
1264 1264 # push is a no-op for SVN
1265 1265 return True
1266 1266
1267 1267 @annotatesubrepoerror
1268 1268 def files(self):
1269 1269 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1270 1270 doc = xml.dom.minidom.parseString(output)
1271 1271 paths = []
1272 1272 for e in doc.getElementsByTagName('entry'):
1273 1273 kind = str(e.getAttribute('kind'))
1274 1274 if kind != 'file':
1275 1275 continue
1276 1276 name = ''.join(c.data for c
1277 1277 in e.getElementsByTagName('name')[0].childNodes
1278 1278 if c.nodeType == c.TEXT_NODE)
1279 1279 paths.append(name.encode('utf-8'))
1280 1280 return paths
1281 1281
1282 1282 def filedata(self, name):
1283 1283 return self._svncommand(['cat'], name)[0]
1284 1284
1285 1285
1286 1286 class gitsubrepo(abstractsubrepo):
1287 def __init__(self, ctx, path, state):
1287 def __init__(self, ctx, path, state, allowcreate):
1288 1288 super(gitsubrepo, self).__init__(ctx, path)
1289 1289 self._state = state
1290 1290 self._abspath = ctx.repo().wjoin(path)
1291 1291 self._subparent = ctx.repo()
1292 1292 self._ensuregit()
1293 1293
1294 1294 def _ensuregit(self):
1295 1295 try:
1296 1296 self._gitexecutable = 'git'
1297 1297 out, err = self._gitnodir(['--version'])
1298 1298 except OSError as e:
1299 1299 genericerror = _("error executing git for subrepo '%s': %s")
1300 1300 notfoundhint = _("check git is installed and in your PATH")
1301 1301 if e.errno != errno.ENOENT:
1302 1302 raise error.Abort(genericerror % (self._path, e.strerror))
1303 1303 elif os.name == 'nt':
1304 1304 try:
1305 1305 self._gitexecutable = 'git.cmd'
1306 1306 out, err = self._gitnodir(['--version'])
1307 1307 except OSError as e2:
1308 1308 if e2.errno == errno.ENOENT:
1309 1309 raise error.Abort(_("couldn't find 'git' or 'git.cmd'"
1310 1310 " for subrepo '%s'") % self._path,
1311 1311 hint=notfoundhint)
1312 1312 else:
1313 1313 raise error.Abort(genericerror % (self._path,
1314 1314 e2.strerror))
1315 1315 else:
1316 1316 raise error.Abort(_("couldn't find git for subrepo '%s'")
1317 1317 % self._path, hint=notfoundhint)
1318 1318 versionstatus = self._checkversion(out)
1319 1319 if versionstatus == 'unknown':
1320 1320 self.ui.warn(_('cannot retrieve git version\n'))
1321 1321 elif versionstatus == 'abort':
1322 1322 raise error.Abort(_('git subrepo requires at least 1.6.0 or later'))
1323 1323 elif versionstatus == 'warning':
1324 1324 self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1325 1325
1326 1326 @staticmethod
1327 1327 def _gitversion(out):
1328 1328 m = re.search(r'^git version (\d+)\.(\d+)\.(\d+)', out)
1329 1329 if m:
1330 1330 return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
1331 1331
1332 1332 m = re.search(r'^git version (\d+)\.(\d+)', out)
1333 1333 if m:
1334 1334 return (int(m.group(1)), int(m.group(2)), 0)
1335 1335
1336 1336 return -1
1337 1337
1338 1338 @staticmethod
1339 1339 def _checkversion(out):
1340 1340 '''ensure git version is new enough
1341 1341
1342 1342 >>> _checkversion = gitsubrepo._checkversion
1343 1343 >>> _checkversion('git version 1.6.0')
1344 1344 'ok'
1345 1345 >>> _checkversion('git version 1.8.5')
1346 1346 'ok'
1347 1347 >>> _checkversion('git version 1.4.0')
1348 1348 'abort'
1349 1349 >>> _checkversion('git version 1.5.0')
1350 1350 'warning'
1351 1351 >>> _checkversion('git version 1.9-rc0')
1352 1352 'ok'
1353 1353 >>> _checkversion('git version 1.9.0.265.g81cdec2')
1354 1354 'ok'
1355 1355 >>> _checkversion('git version 1.9.0.GIT')
1356 1356 'ok'
1357 1357 >>> _checkversion('git version 12345')
1358 1358 'unknown'
1359 1359 >>> _checkversion('no')
1360 1360 'unknown'
1361 1361 '''
1362 1362 version = gitsubrepo._gitversion(out)
1363 1363 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1364 1364 # despite the docstring comment. For now, error on 1.4.0, warn on
1365 1365 # 1.5.0 but attempt to continue.
1366 1366 if version == -1:
1367 1367 return 'unknown'
1368 1368 if version < (1, 5, 0):
1369 1369 return 'abort'
1370 1370 elif version < (1, 6, 0):
1371 1371 return 'warning'
1372 1372 return 'ok'
1373 1373
1374 1374 def _gitcommand(self, commands, env=None, stream=False):
1375 1375 return self._gitdir(commands, env=env, stream=stream)[0]
1376 1376
1377 1377 def _gitdir(self, commands, env=None, stream=False):
1378 1378 return self._gitnodir(commands, env=env, stream=stream,
1379 1379 cwd=self._abspath)
1380 1380
1381 1381 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1382 1382 """Calls the git command
1383 1383
1384 1384 The methods tries to call the git command. versions prior to 1.6.0
1385 1385 are not supported and very probably fail.
1386 1386 """
1387 1387 self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1388 1388 if env is None:
1389 1389 env = os.environ.copy()
1390 1390 # disable localization for Git output (issue5176)
1391 1391 env['LC_ALL'] = 'C'
1392 1392 # fix for Git CVE-2015-7545
1393 1393 if 'GIT_ALLOW_PROTOCOL' not in env:
1394 1394 env['GIT_ALLOW_PROTOCOL'] = 'file:git:http:https:ssh'
1395 1395 # unless ui.quiet is set, print git's stderr,
1396 1396 # which is mostly progress and useful info
1397 1397 errpipe = None
1398 1398 if self.ui.quiet:
1399 1399 errpipe = open(os.devnull, 'w')
1400 1400 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1401 1401 cwd=cwd, env=env, close_fds=util.closefds,
1402 1402 stdout=subprocess.PIPE, stderr=errpipe)
1403 1403 if stream:
1404 1404 return p.stdout, None
1405 1405
1406 1406 retdata = p.stdout.read().strip()
1407 1407 # wait for the child to exit to avoid race condition.
1408 1408 p.wait()
1409 1409
1410 1410 if p.returncode != 0 and p.returncode != 1:
1411 1411 # there are certain error codes that are ok
1412 1412 command = commands[0]
1413 1413 if command in ('cat-file', 'symbolic-ref'):
1414 1414 return retdata, p.returncode
1415 1415 # for all others, abort
1416 1416 raise error.Abort('git %s error %d in %s' %
1417 1417 (command, p.returncode, self._relpath))
1418 1418
1419 1419 return retdata, p.returncode
1420 1420
1421 1421 def _gitmissing(self):
1422 1422 return not self.wvfs.exists('.git')
1423 1423
1424 1424 def _gitstate(self):
1425 1425 return self._gitcommand(['rev-parse', 'HEAD'])
1426 1426
1427 1427 def _gitcurrentbranch(self):
1428 1428 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1429 1429 if err:
1430 1430 current = None
1431 1431 return current
1432 1432
1433 1433 def _gitremote(self, remote):
1434 1434 out = self._gitcommand(['remote', 'show', '-n', remote])
1435 1435 line = out.split('\n')[1]
1436 1436 i = line.index('URL: ') + len('URL: ')
1437 1437 return line[i:]
1438 1438
1439 1439 def _githavelocally(self, revision):
1440 1440 out, code = self._gitdir(['cat-file', '-e', revision])
1441 1441 return code == 0
1442 1442
1443 1443 def _gitisancestor(self, r1, r2):
1444 1444 base = self._gitcommand(['merge-base', r1, r2])
1445 1445 return base == r1
1446 1446
1447 1447 def _gitisbare(self):
1448 1448 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1449 1449
1450 1450 def _gitupdatestat(self):
1451 1451 """This must be run before git diff-index.
1452 1452 diff-index only looks at changes to file stat;
1453 1453 this command looks at file contents and updates the stat."""
1454 1454 self._gitcommand(['update-index', '-q', '--refresh'])
1455 1455
1456 1456 def _gitbranchmap(self):
1457 1457 '''returns 2 things:
1458 1458 a map from git branch to revision
1459 1459 a map from revision to branches'''
1460 1460 branch2rev = {}
1461 1461 rev2branch = {}
1462 1462
1463 1463 out = self._gitcommand(['for-each-ref', '--format',
1464 1464 '%(objectname) %(refname)'])
1465 1465 for line in out.split('\n'):
1466 1466 revision, ref = line.split(' ')
1467 1467 if (not ref.startswith('refs/heads/') and
1468 1468 not ref.startswith('refs/remotes/')):
1469 1469 continue
1470 1470 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1471 1471 continue # ignore remote/HEAD redirects
1472 1472 branch2rev[ref] = revision
1473 1473 rev2branch.setdefault(revision, []).append(ref)
1474 1474 return branch2rev, rev2branch
1475 1475
1476 1476 def _gittracking(self, branches):
1477 1477 'return map of remote branch to local tracking branch'
1478 1478 # assumes no more than one local tracking branch for each remote
1479 1479 tracking = {}
1480 1480 for b in branches:
1481 1481 if b.startswith('refs/remotes/'):
1482 1482 continue
1483 1483 bname = b.split('/', 2)[2]
1484 1484 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1485 1485 if remote:
1486 1486 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1487 1487 tracking['refs/remotes/%s/%s' %
1488 1488 (remote, ref.split('/', 2)[2])] = b
1489 1489 return tracking
1490 1490
1491 1491 def _abssource(self, source):
1492 1492 if '://' not in source:
1493 1493 # recognize the scp syntax as an absolute source
1494 1494 colon = source.find(':')
1495 1495 if colon != -1 and '/' not in source[:colon]:
1496 1496 return source
1497 1497 self._subsource = source
1498 1498 return _abssource(self)
1499 1499
1500 1500 def _fetch(self, source, revision):
1501 1501 if self._gitmissing():
1502 1502 source = self._abssource(source)
1503 1503 self.ui.status(_('cloning subrepo %s from %s\n') %
1504 1504 (self._relpath, source))
1505 1505 self._gitnodir(['clone', source, self._abspath])
1506 1506 if self._githavelocally(revision):
1507 1507 return
1508 1508 self.ui.status(_('pulling subrepo %s from %s\n') %
1509 1509 (self._relpath, self._gitremote('origin')))
1510 1510 # try only origin: the originally cloned repo
1511 1511 self._gitcommand(['fetch'])
1512 1512 if not self._githavelocally(revision):
1513 1513 raise error.Abort(_("revision %s does not exist in subrepo %s\n") %
1514 1514 (revision, self._relpath))
1515 1515
1516 1516 @annotatesubrepoerror
1517 1517 def dirty(self, ignoreupdate=False):
1518 1518 if self._gitmissing():
1519 1519 return self._state[1] != ''
1520 1520 if self._gitisbare():
1521 1521 return True
1522 1522 if not ignoreupdate and self._state[1] != self._gitstate():
1523 1523 # different version checked out
1524 1524 return True
1525 1525 # check for staged changes or modified files; ignore untracked files
1526 1526 self._gitupdatestat()
1527 1527 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1528 1528 return code == 1
1529 1529
1530 1530 def basestate(self):
1531 1531 return self._gitstate()
1532 1532
1533 1533 @annotatesubrepoerror
1534 1534 def get(self, state, overwrite=False):
1535 1535 source, revision, kind = state
1536 1536 if not revision:
1537 1537 self.remove()
1538 1538 return
1539 1539 self._fetch(source, revision)
1540 1540 # if the repo was set to be bare, unbare it
1541 1541 if self._gitisbare():
1542 1542 self._gitcommand(['config', 'core.bare', 'false'])
1543 1543 if self._gitstate() == revision:
1544 1544 self._gitcommand(['reset', '--hard', 'HEAD'])
1545 1545 return
1546 1546 elif self._gitstate() == revision:
1547 1547 if overwrite:
1548 1548 # first reset the index to unmark new files for commit, because
1549 1549 # reset --hard will otherwise throw away files added for commit,
1550 1550 # not just unmark them.
1551 1551 self._gitcommand(['reset', 'HEAD'])
1552 1552 self._gitcommand(['reset', '--hard', 'HEAD'])
1553 1553 return
1554 1554 branch2rev, rev2branch = self._gitbranchmap()
1555 1555
1556 1556 def checkout(args):
1557 1557 cmd = ['checkout']
1558 1558 if overwrite:
1559 1559 # first reset the index to unmark new files for commit, because
1560 1560 # the -f option will otherwise throw away files added for
1561 1561 # commit, not just unmark them.
1562 1562 self._gitcommand(['reset', 'HEAD'])
1563 1563 cmd.append('-f')
1564 1564 self._gitcommand(cmd + args)
1565 1565 _sanitize(self.ui, self.wvfs, '.git')
1566 1566
1567 1567 def rawcheckout():
1568 1568 # no branch to checkout, check it out with no branch
1569 1569 self.ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1570 1570 self._relpath)
1571 1571 self.ui.warn(_('check out a git branch if you intend '
1572 1572 'to make changes\n'))
1573 1573 checkout(['-q', revision])
1574 1574
1575 1575 if revision not in rev2branch:
1576 1576 rawcheckout()
1577 1577 return
1578 1578 branches = rev2branch[revision]
1579 1579 firstlocalbranch = None
1580 1580 for b in branches:
1581 1581 if b == 'refs/heads/master':
1582 1582 # master trumps all other branches
1583 1583 checkout(['refs/heads/master'])
1584 1584 return
1585 1585 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1586 1586 firstlocalbranch = b
1587 1587 if firstlocalbranch:
1588 1588 checkout([firstlocalbranch])
1589 1589 return
1590 1590
1591 1591 tracking = self._gittracking(branch2rev.keys())
1592 1592 # choose a remote branch already tracked if possible
1593 1593 remote = branches[0]
1594 1594 if remote not in tracking:
1595 1595 for b in branches:
1596 1596 if b in tracking:
1597 1597 remote = b
1598 1598 break
1599 1599
1600 1600 if remote not in tracking:
1601 1601 # create a new local tracking branch
1602 1602 local = remote.split('/', 3)[3]
1603 1603 checkout(['-b', local, remote])
1604 1604 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1605 1605 # When updating to a tracked remote branch,
1606 1606 # if the local tracking branch is downstream of it,
1607 1607 # a normal `git pull` would have performed a "fast-forward merge"
1608 1608 # which is equivalent to updating the local branch to the remote.
1609 1609 # Since we are only looking at branching at update, we need to
1610 1610 # detect this situation and perform this action lazily.
1611 1611 if tracking[remote] != self._gitcurrentbranch():
1612 1612 checkout([tracking[remote]])
1613 1613 self._gitcommand(['merge', '--ff', remote])
1614 1614 _sanitize(self.ui, self.wvfs, '.git')
1615 1615 else:
1616 1616 # a real merge would be required, just checkout the revision
1617 1617 rawcheckout()
1618 1618
1619 1619 @annotatesubrepoerror
1620 1620 def commit(self, text, user, date):
1621 1621 if self._gitmissing():
1622 1622 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1623 1623 cmd = ['commit', '-a', '-m', text]
1624 1624 env = os.environ.copy()
1625 1625 if user:
1626 1626 cmd += ['--author', user]
1627 1627 if date:
1628 1628 # git's date parser silently ignores when seconds < 1e9
1629 1629 # convert to ISO8601
1630 1630 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1631 1631 '%Y-%m-%dT%H:%M:%S %1%2')
1632 1632 self._gitcommand(cmd, env=env)
1633 1633 # make sure commit works otherwise HEAD might not exist under certain
1634 1634 # circumstances
1635 1635 return self._gitstate()
1636 1636
1637 1637 @annotatesubrepoerror
1638 1638 def merge(self, state):
1639 1639 source, revision, kind = state
1640 1640 self._fetch(source, revision)
1641 1641 base = self._gitcommand(['merge-base', revision, self._state[1]])
1642 1642 self._gitupdatestat()
1643 1643 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1644 1644
1645 1645 def mergefunc():
1646 1646 if base == revision:
1647 1647 self.get(state) # fast forward merge
1648 1648 elif base != self._state[1]:
1649 1649 self._gitcommand(['merge', '--no-commit', revision])
1650 1650 _sanitize(self.ui, self.wvfs, '.git')
1651 1651
1652 1652 if self.dirty():
1653 1653 if self._gitstate() != revision:
1654 1654 dirty = self._gitstate() == self._state[1] or code != 0
1655 1655 if _updateprompt(self.ui, self, dirty,
1656 1656 self._state[1][:7], revision[:7]):
1657 1657 mergefunc()
1658 1658 else:
1659 1659 mergefunc()
1660 1660
1661 1661 @annotatesubrepoerror
1662 1662 def push(self, opts):
1663 1663 force = opts.get('force')
1664 1664
1665 1665 if not self._state[1]:
1666 1666 return True
1667 1667 if self._gitmissing():
1668 1668 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1669 1669 # if a branch in origin contains the revision, nothing to do
1670 1670 branch2rev, rev2branch = self._gitbranchmap()
1671 1671 if self._state[1] in rev2branch:
1672 1672 for b in rev2branch[self._state[1]]:
1673 1673 if b.startswith('refs/remotes/origin/'):
1674 1674 return True
1675 1675 for b, revision in branch2rev.iteritems():
1676 1676 if b.startswith('refs/remotes/origin/'):
1677 1677 if self._gitisancestor(self._state[1], revision):
1678 1678 return True
1679 1679 # otherwise, try to push the currently checked out branch
1680 1680 cmd = ['push']
1681 1681 if force:
1682 1682 cmd.append('--force')
1683 1683
1684 1684 current = self._gitcurrentbranch()
1685 1685 if current:
1686 1686 # determine if the current branch is even useful
1687 1687 if not self._gitisancestor(self._state[1], current):
1688 1688 self.ui.warn(_('unrelated git branch checked out '
1689 1689 'in subrepo %s\n') % self._relpath)
1690 1690 return False
1691 1691 self.ui.status(_('pushing branch %s of subrepo %s\n') %
1692 1692 (current.split('/', 2)[2], self._relpath))
1693 1693 ret = self._gitdir(cmd + ['origin', current])
1694 1694 return ret[1] == 0
1695 1695 else:
1696 1696 self.ui.warn(_('no branch checked out in subrepo %s\n'
1697 1697 'cannot push revision %s\n') %
1698 1698 (self._relpath, self._state[1]))
1699 1699 return False
1700 1700
1701 1701 @annotatesubrepoerror
1702 1702 def add(self, ui, match, prefix, explicitonly, **opts):
1703 1703 if self._gitmissing():
1704 1704 return []
1705 1705
1706 1706 (modified, added, removed,
1707 1707 deleted, unknown, ignored, clean) = self.status(None, unknown=True,
1708 1708 clean=True)
1709 1709
1710 1710 tracked = set()
1711 1711 # dirstates 'amn' warn, 'r' is added again
1712 1712 for l in (modified, added, deleted, clean):
1713 1713 tracked.update(l)
1714 1714
1715 1715 # Unknown files not of interest will be rejected by the matcher
1716 1716 files = unknown
1717 1717 files.extend(match.files())
1718 1718
1719 1719 rejected = []
1720 1720
1721 1721 files = [f for f in sorted(set(files)) if match(f)]
1722 1722 for f in files:
1723 1723 exact = match.exact(f)
1724 1724 command = ["add"]
1725 1725 if exact:
1726 1726 command.append("-f") #should be added, even if ignored
1727 1727 if ui.verbose or not exact:
1728 1728 ui.status(_('adding %s\n') % match.rel(f))
1729 1729
1730 1730 if f in tracked: # hg prints 'adding' even if already tracked
1731 1731 if exact:
1732 1732 rejected.append(f)
1733 1733 continue
1734 1734 if not opts.get('dry_run'):
1735 1735 self._gitcommand(command + [f])
1736 1736
1737 1737 for f in rejected:
1738 1738 ui.warn(_("%s already tracked!\n") % match.abs(f))
1739 1739
1740 1740 return rejected
1741 1741
1742 1742 @annotatesubrepoerror
1743 1743 def remove(self):
1744 1744 if self._gitmissing():
1745 1745 return
1746 1746 if self.dirty():
1747 1747 self.ui.warn(_('not removing repo %s because '
1748 1748 'it has changes.\n') % self._relpath)
1749 1749 return
1750 1750 # we can't fully delete the repository as it may contain
1751 1751 # local-only history
1752 1752 self.ui.note(_('removing subrepo %s\n') % self._relpath)
1753 1753 self._gitcommand(['config', 'core.bare', 'true'])
1754 1754 for f, kind in self.wvfs.readdir():
1755 1755 if f == '.git':
1756 1756 continue
1757 1757 if kind == stat.S_IFDIR:
1758 1758 self.wvfs.rmtree(f)
1759 1759 else:
1760 1760 self.wvfs.unlink(f)
1761 1761
1762 1762 def archive(self, archiver, prefix, match=None):
1763 1763 total = 0
1764 1764 source, revision = self._state
1765 1765 if not revision:
1766 1766 return total
1767 1767 self._fetch(source, revision)
1768 1768
1769 1769 # Parse git's native archive command.
1770 1770 # This should be much faster than manually traversing the trees
1771 1771 # and objects with many subprocess calls.
1772 1772 tarstream = self._gitcommand(['archive', revision], stream=True)
1773 1773 tar = tarfile.open(fileobj=tarstream, mode='r|')
1774 1774 relpath = subrelpath(self)
1775 1775 self.ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1776 1776 for i, info in enumerate(tar):
1777 1777 if info.isdir():
1778 1778 continue
1779 1779 if match and not match(info.name):
1780 1780 continue
1781 1781 if info.issym():
1782 1782 data = info.linkname
1783 1783 else:
1784 1784 data = tar.extractfile(info).read()
1785 1785 archiver.addfile(prefix + self._path + '/' + info.name,
1786 1786 info.mode, info.issym(), data)
1787 1787 total += 1
1788 1788 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
1789 1789 unit=_('files'))
1790 1790 self.ui.progress(_('archiving (%s)') % relpath, None)
1791 1791 return total
1792 1792
1793 1793
1794 1794 @annotatesubrepoerror
1795 1795 def cat(self, match, prefix, **opts):
1796 1796 rev = self._state[1]
1797 1797 if match.anypats():
1798 1798 return 1 #No support for include/exclude yet
1799 1799
1800 1800 if not match.files():
1801 1801 return 1
1802 1802
1803 1803 for f in match.files():
1804 1804 output = self._gitcommand(["show", "%s:%s" % (rev, f)])
1805 1805 fp = cmdutil.makefileobj(self._subparent, opts.get('output'),
1806 1806 self._ctx.node(),
1807 1807 pathname=self.wvfs.reljoin(prefix, f))
1808 1808 fp.write(output)
1809 1809 fp.close()
1810 1810 return 0
1811 1811
1812 1812
1813 1813 @annotatesubrepoerror
1814 1814 def status(self, rev2, **opts):
1815 1815 rev1 = self._state[1]
1816 1816 if self._gitmissing() or not rev1:
1817 1817 # if the repo is missing, return no results
1818 1818 return scmutil.status([], [], [], [], [], [], [])
1819 1819 modified, added, removed = [], [], []
1820 1820 self._gitupdatestat()
1821 1821 if rev2:
1822 1822 command = ['diff-tree', '--no-renames', '-r', rev1, rev2]
1823 1823 else:
1824 1824 command = ['diff-index', '--no-renames', rev1]
1825 1825 out = self._gitcommand(command)
1826 1826 for line in out.split('\n'):
1827 1827 tab = line.find('\t')
1828 1828 if tab == -1:
1829 1829 continue
1830 1830 status, f = line[tab - 1], line[tab + 1:]
1831 1831 if status == 'M':
1832 1832 modified.append(f)
1833 1833 elif status == 'A':
1834 1834 added.append(f)
1835 1835 elif status == 'D':
1836 1836 removed.append(f)
1837 1837
1838 1838 deleted, unknown, ignored, clean = [], [], [], []
1839 1839
1840 1840 command = ['status', '--porcelain', '-z']
1841 1841 if opts.get('unknown'):
1842 1842 command += ['--untracked-files=all']
1843 1843 if opts.get('ignored'):
1844 1844 command += ['--ignored']
1845 1845 out = self._gitcommand(command)
1846 1846
1847 1847 changedfiles = set()
1848 1848 changedfiles.update(modified)
1849 1849 changedfiles.update(added)
1850 1850 changedfiles.update(removed)
1851 1851 for line in out.split('\0'):
1852 1852 if not line:
1853 1853 continue
1854 1854 st = line[0:2]
1855 1855 #moves and copies show 2 files on one line
1856 1856 if line.find('\0') >= 0:
1857 1857 filename1, filename2 = line[3:].split('\0')
1858 1858 else:
1859 1859 filename1 = line[3:]
1860 1860 filename2 = None
1861 1861
1862 1862 changedfiles.add(filename1)
1863 1863 if filename2:
1864 1864 changedfiles.add(filename2)
1865 1865
1866 1866 if st == '??':
1867 1867 unknown.append(filename1)
1868 1868 elif st == '!!':
1869 1869 ignored.append(filename1)
1870 1870
1871 1871 if opts.get('clean'):
1872 1872 out = self._gitcommand(['ls-files'])
1873 1873 for f in out.split('\n'):
1874 1874 if not f in changedfiles:
1875 1875 clean.append(f)
1876 1876
1877 1877 return scmutil.status(modified, added, removed, deleted,
1878 1878 unknown, ignored, clean)
1879 1879
1880 1880 @annotatesubrepoerror
1881 1881 def diff(self, ui, diffopts, node2, match, prefix, **opts):
1882 1882 node1 = self._state[1]
1883 1883 cmd = ['diff', '--no-renames']
1884 1884 if opts['stat']:
1885 1885 cmd.append('--stat')
1886 1886 else:
1887 1887 # for Git, this also implies '-p'
1888 1888 cmd.append('-U%d' % diffopts.context)
1889 1889
1890 1890 gitprefix = self.wvfs.reljoin(prefix, self._path)
1891 1891
1892 1892 if diffopts.noprefix:
1893 1893 cmd.extend(['--src-prefix=%s/' % gitprefix,
1894 1894 '--dst-prefix=%s/' % gitprefix])
1895 1895 else:
1896 1896 cmd.extend(['--src-prefix=a/%s/' % gitprefix,
1897 1897 '--dst-prefix=b/%s/' % gitprefix])
1898 1898
1899 1899 if diffopts.ignorews:
1900 1900 cmd.append('--ignore-all-space')
1901 1901 if diffopts.ignorewsamount:
1902 1902 cmd.append('--ignore-space-change')
1903 1903 if self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4) \
1904 1904 and diffopts.ignoreblanklines:
1905 1905 cmd.append('--ignore-blank-lines')
1906 1906
1907 1907 cmd.append(node1)
1908 1908 if node2:
1909 1909 cmd.append(node2)
1910 1910
1911 1911 output = ""
1912 1912 if match.always():
1913 1913 output += self._gitcommand(cmd) + '\n'
1914 1914 else:
1915 1915 st = self.status(node2)[:3]
1916 1916 files = [f for sublist in st for f in sublist]
1917 1917 for f in files:
1918 1918 if match(f):
1919 1919 output += self._gitcommand(cmd + ['--', f]) + '\n'
1920 1920
1921 1921 if output.strip():
1922 1922 ui.write(output)
1923 1923
1924 1924 @annotatesubrepoerror
1925 1925 def revert(self, substate, *pats, **opts):
1926 1926 self.ui.status(_('reverting subrepo %s\n') % substate[0])
1927 1927 if not opts.get('no_backup'):
1928 1928 status = self.status(None)
1929 1929 names = status.modified
1930 1930 for name in names:
1931 1931 bakname = scmutil.origpath(self.ui, self._subparent, name)
1932 1932 self.ui.note(_('saving current version of %s as %s\n') %
1933 1933 (name, bakname))
1934 1934 self.wvfs.rename(name, bakname)
1935 1935
1936 1936 if not opts.get('dry_run'):
1937 1937 self.get(substate, overwrite=True)
1938 1938 return []
1939 1939
1940 1940 def shortid(self, revid):
1941 1941 return revid[:7]
1942 1942
1943 1943 types = {
1944 1944 'hg': hgsubrepo,
1945 1945 'svn': svnsubrepo,
1946 1946 'git': gitsubrepo,
1947 1947 }
@@ -1,124 +1,142
1 1 $ hg init repo
2 2 $ cd repo
3 3 $ hg init subrepo
4 4 $ echo a > subrepo/a
5 5 $ hg -R subrepo ci -Am adda
6 6 adding a
7 7 $ echo 'subrepo = subrepo' > .hgsub
8 8 $ hg ci -Am addsubrepo
9 9 adding .hgsub
10 10 $ echo b > subrepo/b
11 11 $ hg -R subrepo ci -Am addb
12 12 adding b
13 13 $ hg ci -m updatedsub
14 14
15 15 ignore blanklines in .hgsubstate
16 16
17 17 >>> file('.hgsubstate', 'wb').write('\n\n \t \n \n')
18 18 $ hg st --subrepos
19 19 M .hgsubstate
20 20 $ hg revert -qC .hgsubstate
21 21
22 22 abort more gracefully on .hgsubstate parsing error
23 23
24 24 $ cp .hgsubstate .hgsubstate.old
25 25 >>> file('.hgsubstate', 'wb').write('\ninvalid')
26 26 $ hg st --subrepos --cwd $TESTTMP -R $TESTTMP/repo
27 27 abort: invalid subrepository revision specifier in 'repo/.hgsubstate' line 2
28 28 [255]
29 29 $ mv .hgsubstate.old .hgsubstate
30 30
31 31 delete .hgsub and revert it
32 32
33 33 $ rm .hgsub
34 34 $ hg revert .hgsub
35 35 warning: subrepo spec file '.hgsub' not found
36 36 warning: subrepo spec file '.hgsub' not found
37 37 warning: subrepo spec file '.hgsub' not found
38 38
39 39 delete .hgsubstate and revert it
40 40
41 41 $ rm .hgsubstate
42 42 $ hg revert .hgsubstate
43 43
44 44 delete .hgsub and update
45 45
46 46 $ rm .hgsub
47 47 $ hg up 0 --cwd $TESTTMP -R $TESTTMP/repo
48 48 warning: subrepo spec file 'repo/.hgsub' not found
49 49 warning: subrepo spec file 'repo/.hgsub' not found
50 50 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
51 51 $ hg st
52 52 warning: subrepo spec file '.hgsub' not found
53 53 ! .hgsub
54 54 $ ls subrepo
55 55 a
56 56
57 57 delete .hgsubstate and update
58 58
59 59 $ hg up -C
60 60 warning: subrepo spec file '.hgsub' not found
61 61 warning: subrepo spec file '.hgsub' not found
62 62 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 63 $ rm .hgsubstate
64 64 $ hg up 0
65 65 remote changed .hgsubstate which local deleted
66 66 use (c)hanged version or leave (d)eleted? c
67 67 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 68 $ hg st
69 69 $ ls subrepo
70 70 a
71 71
72 72 Enable obsolete
73 73
74 74 $ cat >> $HGRCPATH << EOF
75 75 > [ui]
76 76 > logtemplate= {rev}:{node|short} {desc|firstline}
77 77 > [phases]
78 78 > publish=False
79 79 > [experimental]
80 80 > evolution=createmarkers
81 81 > EOF
82 82
83 83 check that we can update parent repo with missing (amended) subrepo revision
84 84
85 85 $ hg up --repository subrepo -r tip
86 86 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
87 87 $ hg ci -m "updated subrepo to tip"
88 88 created new head
89 89 $ cd subrepo
90 90 $ hg update -r tip
91 91 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 92 $ echo foo > a
93 93 $ hg commit --amend -m "addb (amended)"
94 94 $ cd ..
95 95 $ hg update --clean .
96 96 revision 102a90ea7b4a in subrepo subrepo is hidden
97 97 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 98
99 99 check that --hidden is propagated to the subrepo
100 100
101 101 $ hg -R subrepo up tip
102 102 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
103 103 $ hg ci -m 'commit with amended subrepo'
104 104 $ echo bar > subrepo/a
105 105 $ hg -R subrepo ci --amend -m "amend a (again)"
106 106 $ hg --hidden cat subrepo/a
107 107 foo
108 108
109 109 verify will warn if locked-in subrepo revisions are hidden or missing
110 110
111 111 $ hg ci -m "amended subrepo (again)"
112 112 $ hg --config extensions.strip= --hidden strip -R subrepo -qr 'tip'
113 113 $ hg verify
114 114 checking changesets
115 115 checking manifests
116 116 crosschecking files in changesets and manifests
117 117 checking files
118 118 2 files, 5 changesets, 5 total revisions
119 119 checking subrepo links
120 120 subrepo 'subrepo' is hidden in revision a66de08943b6
121 121 subrepo 'subrepo' is hidden in revision 674d05939c1e
122 122 subrepo 'subrepo' not found in revision a7d05d9055a4
123 123
124 verifying shouldn't init a new subrepo if the reference doesn't exist
125
126 $ mv subrepo b
127 $ hg verify
128 checking changesets
129 checking manifests
130 crosschecking files in changesets and manifests
131 checking files
132 2 files, 5 changesets, 5 total revisions
133 checking subrepo links
134 0: repository $TESTTMP/repo/subrepo not found (glob)
135 1: repository $TESTTMP/repo/subrepo not found (glob)
136 3: repository $TESTTMP/repo/subrepo not found (glob)
137 4: repository $TESTTMP/repo/subrepo not found (glob)
138 $ ls
139 b
140 $ mv b subrepo
141
124 142 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now