##// END OF EJS Templates
subrepo: extract preprocess of repo.commit() to free function...
Yuya Nishihara -
r35018:5c6b96b8 stable
parent child Browse files
Show More
@@ -1,2346 +1,2296
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 60 subrepo,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67
68 68 release = lockmod.release
69 69 urlerr = util.urlerr
70 70 urlreq = util.urlreq
71 71
72 72 # set of (path, vfs-location) tuples. vfs-location is:
73 73 # - 'plain for vfs relative paths
74 74 # - '' for svfs relative paths
75 75 _cachedfiles = set()
76 76
77 77 class _basefilecache(scmutil.filecache):
78 78 """All filecache usage on repo are done for logic that should be unfiltered
79 79 """
80 80 def __get__(self, repo, type=None):
81 81 if repo is None:
82 82 return self
83 83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 84 def __set__(self, repo, value):
85 85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 86 def __delete__(self, repo):
87 87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88 88
89 89 class repofilecache(_basefilecache):
90 90 """filecache for files in .hg but outside of .hg/store"""
91 91 def __init__(self, *paths):
92 92 super(repofilecache, self).__init__(*paths)
93 93 for path in paths:
94 94 _cachedfiles.add((path, 'plain'))
95 95
96 96 def join(self, obj, fname):
97 97 return obj.vfs.join(fname)
98 98
99 99 class storecache(_basefilecache):
100 100 """filecache for files in the store"""
101 101 def __init__(self, *paths):
102 102 super(storecache, self).__init__(*paths)
103 103 for path in paths:
104 104 _cachedfiles.add((path, ''))
105 105
106 106 def join(self, obj, fname):
107 107 return obj.sjoin(fname)
108 108
109 109 def isfilecached(repo, name):
110 110 """check if a repo has already cached "name" filecache-ed property
111 111
112 112 This returns (cachedobj-or-None, iscached) tuple.
113 113 """
114 114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 115 if not cacheentry:
116 116 return None, False
117 117 return cacheentry.obj, True
118 118
119 119 class unfilteredpropertycache(util.propertycache):
120 120 """propertycache that apply to unfiltered repo only"""
121 121
122 122 def __get__(self, repo, type=None):
123 123 unfi = repo.unfiltered()
124 124 if unfi is repo:
125 125 return super(unfilteredpropertycache, self).__get__(unfi)
126 126 return getattr(unfi, self.name)
127 127
128 128 class filteredpropertycache(util.propertycache):
129 129 """propertycache that must take filtering in account"""
130 130
131 131 def cachevalue(self, obj, value):
132 132 object.__setattr__(obj, self.name, value)
133 133
134 134
135 135 def hasunfilteredcache(repo, name):
136 136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 137 return name in vars(repo.unfiltered())
138 138
139 139 def unfilteredmethod(orig):
140 140 """decorate method that always need to be run on unfiltered version"""
141 141 def wrapper(repo, *args, **kwargs):
142 142 return orig(repo.unfiltered(), *args, **kwargs)
143 143 return wrapper
144 144
145 145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 146 'unbundle'}
147 147 legacycaps = moderncaps.union({'changegroupsubset'})
148 148
149 149 class localpeer(repository.peer):
150 150 '''peer for a local repo; reflects only the most recent API'''
151 151
152 152 def __init__(self, repo, caps=None):
153 153 super(localpeer, self).__init__()
154 154
155 155 if caps is None:
156 156 caps = moderncaps.copy()
157 157 self._repo = repo.filtered('served')
158 158 self._ui = repo.ui
159 159 self._caps = repo._restrictcapabilities(caps)
160 160
161 161 # Begin of _basepeer interface.
162 162
163 163 @util.propertycache
164 164 def ui(self):
165 165 return self._ui
166 166
167 167 def url(self):
168 168 return self._repo.url()
169 169
170 170 def local(self):
171 171 return self._repo
172 172
173 173 def peer(self):
174 174 return self
175 175
176 176 def canpush(self):
177 177 return True
178 178
179 179 def close(self):
180 180 self._repo.close()
181 181
182 182 # End of _basepeer interface.
183 183
184 184 # Begin of _basewirecommands interface.
185 185
186 186 def branchmap(self):
187 187 return self._repo.branchmap()
188 188
189 189 def capabilities(self):
190 190 return self._caps
191 191
192 192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 193 """Used to test argument passing over the wire"""
194 194 return "%s %s %s %s %s" % (one, two, three, four, five)
195 195
196 196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 197 **kwargs):
198 198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 199 common=common, bundlecaps=bundlecaps,
200 200 **kwargs)
201 201 cb = util.chunkbuffer(chunks)
202 202
203 203 if exchange.bundle2requested(bundlecaps):
204 204 # When requesting a bundle2, getbundle returns a stream to make the
205 205 # wire level function happier. We need to build a proper object
206 206 # from it in local peer.
207 207 return bundle2.getunbundler(self.ui, cb)
208 208 else:
209 209 return changegroup.getunbundler('01', cb, None)
210 210
211 211 def heads(self):
212 212 return self._repo.heads()
213 213
214 214 def known(self, nodes):
215 215 return self._repo.known(nodes)
216 216
217 217 def listkeys(self, namespace):
218 218 return self._repo.listkeys(namespace)
219 219
220 220 def lookup(self, key):
221 221 return self._repo.lookup(key)
222 222
223 223 def pushkey(self, namespace, key, old, new):
224 224 return self._repo.pushkey(namespace, key, old, new)
225 225
226 226 def stream_out(self):
227 227 raise error.Abort(_('cannot perform stream clone against local '
228 228 'peer'))
229 229
230 230 def unbundle(self, cg, heads, url):
231 231 """apply a bundle on a repo
232 232
233 233 This function handles the repo locking itself."""
234 234 try:
235 235 try:
236 236 cg = exchange.readbundle(self.ui, cg, None)
237 237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 238 if util.safehasattr(ret, 'getchunks'):
239 239 # This is a bundle20 object, turn it into an unbundler.
240 240 # This little dance should be dropped eventually when the
241 241 # API is finally improved.
242 242 stream = util.chunkbuffer(ret.getchunks())
243 243 ret = bundle2.getunbundler(self.ui, stream)
244 244 return ret
245 245 except Exception as exc:
246 246 # If the exception contains output salvaged from a bundle2
247 247 # reply, we need to make sure it is printed before continuing
248 248 # to fail. So we build a bundle2 with such output and consume
249 249 # it directly.
250 250 #
251 251 # This is not very elegant but allows a "simple" solution for
252 252 # issue4594
253 253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 254 if output:
255 255 bundler = bundle2.bundle20(self._repo.ui)
256 256 for out in output:
257 257 bundler.addpart(out)
258 258 stream = util.chunkbuffer(bundler.getchunks())
259 259 b = bundle2.getunbundler(self.ui, stream)
260 260 bundle2.processbundle(self._repo, b)
261 261 raise
262 262 except error.PushRaced as exc:
263 263 raise error.ResponseError(_('push failed:'), str(exc))
264 264
265 265 # End of _basewirecommands interface.
266 266
267 267 # Begin of peer interface.
268 268
269 269 def iterbatch(self):
270 270 return peer.localiterbatcher(self)
271 271
272 272 # End of peer interface.
273 273
274 274 class locallegacypeer(repository.legacypeer, localpeer):
275 275 '''peer extension which implements legacy methods too; used for tests with
276 276 restricted capabilities'''
277 277
278 278 def __init__(self, repo):
279 279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280 280
281 281 # Begin of baselegacywirecommands interface.
282 282
283 283 def between(self, pairs):
284 284 return self._repo.between(pairs)
285 285
286 286 def branches(self, nodes):
287 287 return self._repo.branches(nodes)
288 288
289 289 def changegroup(self, basenodes, source):
290 290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 291 missingheads=self._repo.heads())
292 292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293 293
294 294 def changegroupsubset(self, bases, heads, source):
295 295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 296 missingheads=heads)
297 297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 298
299 299 # End of baselegacywirecommands interface.
300 300
301 301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 302 # clients.
303 303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304 304
305 305 class localrepository(object):
306 306
307 307 supportedformats = {
308 308 'revlogv1',
309 309 'generaldelta',
310 310 'treemanifest',
311 311 'manifestv2',
312 312 REVLOGV2_REQUIREMENT,
313 313 }
314 314 _basesupported = supportedformats | {
315 315 'store',
316 316 'fncache',
317 317 'shared',
318 318 'relshared',
319 319 'dotencode',
320 320 'exp-sparse',
321 321 }
322 322 openerreqs = {
323 323 'revlogv1',
324 324 'generaldelta',
325 325 'treemanifest',
326 326 'manifestv2',
327 327 }
328 328
329 329 # a list of (ui, featureset) functions.
330 330 # only functions defined in module of enabled extensions are invoked
331 331 featuresetupfuncs = set()
332 332
333 333 # list of prefix for file which can be written without 'wlock'
334 334 # Extensions should extend this list when needed
335 335 _wlockfreeprefix = {
336 336 # We migh consider requiring 'wlock' for the next
337 337 # two, but pretty much all the existing code assume
338 338 # wlock is not needed so we keep them excluded for
339 339 # now.
340 340 'hgrc',
341 341 'requires',
342 342 # XXX cache is a complicatged business someone
343 343 # should investigate this in depth at some point
344 344 'cache/',
345 345 # XXX shouldn't be dirstate covered by the wlock?
346 346 'dirstate',
347 347 # XXX bisect was still a bit too messy at the time
348 348 # this changeset was introduced. Someone should fix
349 349 # the remainig bit and drop this line
350 350 'bisect.state',
351 351 }
352 352
353 353 def __init__(self, baseui, path, create=False):
354 354 self.requirements = set()
355 355 self.filtername = None
356 356 # wvfs: rooted at the repository root, used to access the working copy
357 357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 359 self.vfs = None
360 360 # svfs: usually rooted at .hg/store, used to access repository history
361 361 # If this is a shared repository, this vfs may point to another
362 362 # repository's .hg/store directory.
363 363 self.svfs = None
364 364 self.root = self.wvfs.base
365 365 self.path = self.wvfs.join(".hg")
366 366 self.origroot = path
367 367 # These auditor are not used by the vfs,
368 368 # only used when writing this comment: basectx.match
369 369 self.auditor = pathutil.pathauditor(self.root, self._checknested)
370 370 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
371 371 realfs=False, cached=True)
372 372 self.baseui = baseui
373 373 self.ui = baseui.copy()
374 374 self.ui.copy = baseui.copy # prevent copying repo configuration
375 375 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
376 376 if (self.ui.configbool('devel', 'all-warnings') or
377 377 self.ui.configbool('devel', 'check-locks')):
378 378 self.vfs.audit = self._getvfsward(self.vfs.audit)
379 379 # A list of callback to shape the phase if no data were found.
380 380 # Callback are in the form: func(repo, roots) --> processed root.
381 381 # This list it to be filled by extension during repo setup
382 382 self._phasedefaults = []
383 383 try:
384 384 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
385 385 self._loadextensions()
386 386 except IOError:
387 387 pass
388 388
389 389 if self.featuresetupfuncs:
390 390 self.supported = set(self._basesupported) # use private copy
391 391 extmods = set(m.__name__ for n, m
392 392 in extensions.extensions(self.ui))
393 393 for setupfunc in self.featuresetupfuncs:
394 394 if setupfunc.__module__ in extmods:
395 395 setupfunc(self.ui, self.supported)
396 396 else:
397 397 self.supported = self._basesupported
398 398 color.setup(self.ui)
399 399
400 400 # Add compression engines.
401 401 for name in util.compengines:
402 402 engine = util.compengines[name]
403 403 if engine.revlogheader():
404 404 self.supported.add('exp-compression-%s' % name)
405 405
406 406 if not self.vfs.isdir():
407 407 if create:
408 408 self.requirements = newreporequirements(self)
409 409
410 410 if not self.wvfs.exists():
411 411 self.wvfs.makedirs()
412 412 self.vfs.makedir(notindexed=True)
413 413
414 414 if 'store' in self.requirements:
415 415 self.vfs.mkdir("store")
416 416
417 417 # create an invalid changelog
418 418 self.vfs.append(
419 419 "00changelog.i",
420 420 '\0\0\0\2' # represents revlogv2
421 421 ' dummy changelog to prevent using the old repo layout'
422 422 )
423 423 else:
424 424 raise error.RepoError(_("repository %s not found") % path)
425 425 elif create:
426 426 raise error.RepoError(_("repository %s already exists") % path)
427 427 else:
428 428 try:
429 429 self.requirements = scmutil.readrequires(
430 430 self.vfs, self.supported)
431 431 except IOError as inst:
432 432 if inst.errno != errno.ENOENT:
433 433 raise
434 434
435 435 cachepath = self.vfs.join('cache')
436 436 self.sharedpath = self.path
437 437 try:
438 438 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
439 439 if 'relshared' in self.requirements:
440 440 sharedpath = self.vfs.join(sharedpath)
441 441 vfs = vfsmod.vfs(sharedpath, realpath=True)
442 442 cachepath = vfs.join('cache')
443 443 s = vfs.base
444 444 if not vfs.exists():
445 445 raise error.RepoError(
446 446 _('.hg/sharedpath points to nonexistent directory %s') % s)
447 447 self.sharedpath = s
448 448 except IOError as inst:
449 449 if inst.errno != errno.ENOENT:
450 450 raise
451 451
452 452 if 'exp-sparse' in self.requirements and not sparse.enabled:
453 453 raise error.RepoError(_('repository is using sparse feature but '
454 454 'sparse is not enabled; enable the '
455 455 '"sparse" extensions to access'))
456 456
457 457 self.store = store.store(
458 458 self.requirements, self.sharedpath,
459 459 lambda base: vfsmod.vfs(base, cacheaudited=True))
460 460 self.spath = self.store.path
461 461 self.svfs = self.store.vfs
462 462 self.sjoin = self.store.join
463 463 self.vfs.createmode = self.store.createmode
464 464 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
465 465 self.cachevfs.createmode = self.store.createmode
466 466 if (self.ui.configbool('devel', 'all-warnings') or
467 467 self.ui.configbool('devel', 'check-locks')):
468 468 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
469 469 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
470 470 else: # standard vfs
471 471 self.svfs.audit = self._getsvfsward(self.svfs.audit)
472 472 self._applyopenerreqs()
473 473 if create:
474 474 self._writerequirements()
475 475
476 476 self._dirstatevalidatewarned = False
477 477
478 478 self._branchcaches = {}
479 479 self._revbranchcache = None
480 480 self.filterpats = {}
481 481 self._datafilters = {}
482 482 self._transref = self._lockref = self._wlockref = None
483 483
484 484 # A cache for various files under .hg/ that tracks file changes,
485 485 # (used by the filecache decorator)
486 486 #
487 487 # Maps a property name to its util.filecacheentry
488 488 self._filecache = {}
489 489
490 490 # hold sets of revision to be filtered
491 491 # should be cleared when something might have changed the filter value:
492 492 # - new changesets,
493 493 # - phase change,
494 494 # - new obsolescence marker,
495 495 # - working directory parent change,
496 496 # - bookmark changes
497 497 self.filteredrevcache = {}
498 498
499 499 # post-dirstate-status hooks
500 500 self._postdsstatus = []
501 501
502 502 # Cache of types representing filtered repos.
503 503 self._filteredrepotypes = weakref.WeakKeyDictionary()
504 504
505 505 # generic mapping between names and nodes
506 506 self.names = namespaces.namespaces()
507 507
508 508 # Key to signature value.
509 509 self._sparsesignaturecache = {}
510 510 # Signature to cached matcher instance.
511 511 self._sparsematchercache = {}
512 512
513 513 def _getvfsward(self, origfunc):
514 514 """build a ward for self.vfs"""
515 515 rref = weakref.ref(self)
516 516 def checkvfs(path, mode=None):
517 517 ret = origfunc(path, mode=mode)
518 518 repo = rref()
519 519 if (repo is None
520 520 or not util.safehasattr(repo, '_wlockref')
521 521 or not util.safehasattr(repo, '_lockref')):
522 522 return
523 523 if mode in (None, 'r', 'rb'):
524 524 return
525 525 if path.startswith(repo.path):
526 526 # truncate name relative to the repository (.hg)
527 527 path = path[len(repo.path) + 1:]
528 528 if path.startswith('cache/'):
529 529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 531 if path.startswith('journal.'):
532 532 # journal is covered by 'lock'
533 533 if repo._currentlock(repo._lockref) is None:
534 534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 535 stacklevel=2, config='check-locks')
536 536 elif repo._currentlock(repo._wlockref) is None:
537 537 # rest of vfs files are covered by 'wlock'
538 538 #
539 539 # exclude special files
540 540 for prefix in self._wlockfreeprefix:
541 541 if path.startswith(prefix):
542 542 return
543 543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 544 stacklevel=2, config='check-locks')
545 545 return ret
546 546 return checkvfs
547 547
548 548 def _getsvfsward(self, origfunc):
549 549 """build a ward for self.svfs"""
550 550 rref = weakref.ref(self)
551 551 def checksvfs(path, mode=None):
552 552 ret = origfunc(path, mode=mode)
553 553 repo = rref()
554 554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 555 return
556 556 if mode in (None, 'r', 'rb'):
557 557 return
558 558 if path.startswith(repo.sharedpath):
559 559 # truncate name relative to the repository (.hg)
560 560 path = path[len(repo.sharedpath) + 1:]
561 561 if repo._currentlock(repo._lockref) is None:
562 562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 563 stacklevel=3)
564 564 return ret
565 565 return checksvfs
566 566
567 567 def close(self):
568 568 self._writecaches()
569 569
570 570 def _loadextensions(self):
571 571 extensions.loadall(self.ui)
572 572
573 573 def _writecaches(self):
574 574 if self._revbranchcache:
575 575 self._revbranchcache.write()
576 576
577 577 def _restrictcapabilities(self, caps):
578 578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 579 caps = set(caps)
580 580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 581 caps.add('bundle2=' + urlreq.quote(capsblob))
582 582 return caps
583 583
584 584 def _applyopenerreqs(self):
585 585 self.svfs.options = dict((r, 1) for r in self.requirements
586 586 if r in self.openerreqs)
587 587 # experimental config: format.chunkcachesize
588 588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 589 if chunkcachesize is not None:
590 590 self.svfs.options['chunkcachesize'] = chunkcachesize
591 591 # experimental config: format.maxchainlen
592 592 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 593 if maxchainlen is not None:
594 594 self.svfs.options['maxchainlen'] = maxchainlen
595 595 # experimental config: format.manifestcachesize
596 596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 597 if manifestcachesize is not None:
598 598 self.svfs.options['manifestcachesize'] = manifestcachesize
599 599 # experimental config: format.aggressivemergedeltas
600 600 aggressivemergedeltas = self.ui.configbool('format',
601 601 'aggressivemergedeltas')
602 602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 605 if 0 <= chainspan:
606 606 self.svfs.options['maxdeltachainspan'] = chainspan
607 607 mmapindexthreshold = self.ui.configbytes('experimental',
608 608 'mmapindexthreshold')
609 609 if mmapindexthreshold is not None:
610 610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 612 srdensitythres = float(self.ui.config('experimental',
613 613 'sparse-read.density-threshold'))
614 614 srmingapsize = self.ui.configbytes('experimental',
615 615 'sparse-read.min-gap-size')
616 616 self.svfs.options['with-sparse-read'] = withsparseread
617 617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619 619
620 620 for r in self.requirements:
621 621 if r.startswith('exp-compression-'):
622 622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623 623
624 624 # TODO move "revlogv2" to openerreqs once finalized.
625 625 if REVLOGV2_REQUIREMENT in self.requirements:
626 626 self.svfs.options['revlogv2'] = True
627 627
628 628 def _writerequirements(self):
629 629 scmutil.writerequires(self.vfs, self.requirements)
630 630
631 631 def _checknested(self, path):
632 632 """Determine if path is a legal nested repository."""
633 633 if not path.startswith(self.root):
634 634 return False
635 635 subpath = path[len(self.root) + 1:]
636 636 normsubpath = util.pconvert(subpath)
637 637
638 638 # XXX: Checking against the current working copy is wrong in
639 639 # the sense that it can reject things like
640 640 #
641 641 # $ hg cat -r 10 sub/x.txt
642 642 #
643 643 # if sub/ is no longer a subrepository in the working copy
644 644 # parent revision.
645 645 #
646 646 # However, it can of course also allow things that would have
647 647 # been rejected before, such as the above cat command if sub/
648 648 # is a subrepository now, but was a normal directory before.
649 649 # The old path auditor would have rejected by mistake since it
650 650 # panics when it sees sub/.hg/.
651 651 #
652 652 # All in all, checking against the working copy seems sensible
653 653 # since we want to prevent access to nested repositories on
654 654 # the filesystem *now*.
655 655 ctx = self[None]
656 656 parts = util.splitpath(subpath)
657 657 while parts:
658 658 prefix = '/'.join(parts)
659 659 if prefix in ctx.substate:
660 660 if prefix == normsubpath:
661 661 return True
662 662 else:
663 663 sub = ctx.sub(prefix)
664 664 return sub.checknested(subpath[len(prefix) + 1:])
665 665 else:
666 666 parts.pop()
667 667 return False
668 668
669 669 def peer(self):
670 670 return localpeer(self) # not cached to avoid reference cycle
671 671
672 672 def unfiltered(self):
673 673 """Return unfiltered version of the repository
674 674
675 675 Intended to be overwritten by filtered repo."""
676 676 return self
677 677
678 678 def filtered(self, name):
679 679 """Return a filtered version of a repository"""
680 680 # Python <3.4 easily leaks types via __mro__. See
681 681 # https://bugs.python.org/issue17950. We cache dynamically
682 682 # created types so this method doesn't leak on every
683 683 # invocation.
684 684
685 685 key = self.unfiltered().__class__
686 686 if key not in self._filteredrepotypes:
687 687 # Build a new type with the repoview mixin and the base
688 688 # class of this repo. Give it a name containing the
689 689 # filter name to aid debugging.
690 690 bases = (repoview.repoview, key)
691 691 cls = type(r'%sfilteredrepo' % name, bases, {})
692 692 self._filteredrepotypes[key] = cls
693 693
694 694 return self._filteredrepotypes[key](self, name)
695 695
696 696 @repofilecache('bookmarks', 'bookmarks.current')
697 697 def _bookmarks(self):
698 698 return bookmarks.bmstore(self)
699 699
700 700 @property
701 701 def _activebookmark(self):
702 702 return self._bookmarks.active
703 703
704 704 # _phaserevs and _phasesets depend on changelog. what we need is to
705 705 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
706 706 # can't be easily expressed in filecache mechanism.
707 707 @storecache('phaseroots', '00changelog.i')
708 708 def _phasecache(self):
709 709 return phases.phasecache(self, self._phasedefaults)
710 710
711 711 @storecache('obsstore')
712 712 def obsstore(self):
713 713 return obsolete.makestore(self.ui, self)
714 714
715 715 @storecache('00changelog.i')
716 716 def changelog(self):
717 717 return changelog.changelog(self.svfs,
718 718 trypending=txnutil.mayhavepending(self.root))
719 719
720 720 def _constructmanifest(self):
721 721 # This is a temporary function while we migrate from manifest to
722 722 # manifestlog. It allows bundlerepo and unionrepo to intercept the
723 723 # manifest creation.
724 724 return manifest.manifestrevlog(self.svfs)
725 725
726 726 @storecache('00manifest.i')
727 727 def manifestlog(self):
728 728 return manifest.manifestlog(self.svfs, self)
729 729
730 730 @repofilecache('dirstate')
731 731 def dirstate(self):
732 732 sparsematchfn = lambda: sparse.matcher(self)
733 733
734 734 return dirstate.dirstate(self.vfs, self.ui, self.root,
735 735 self._dirstatevalidate, sparsematchfn)
736 736
737 737 def _dirstatevalidate(self, node):
738 738 try:
739 739 self.changelog.rev(node)
740 740 return node
741 741 except error.LookupError:
742 742 if not self._dirstatevalidatewarned:
743 743 self._dirstatevalidatewarned = True
744 744 self.ui.warn(_("warning: ignoring unknown"
745 745 " working parent %s!\n") % short(node))
746 746 return nullid
747 747
748 748 def __getitem__(self, changeid):
749 749 if changeid is None:
750 750 return context.workingctx(self)
751 751 if isinstance(changeid, slice):
752 752 # wdirrev isn't contiguous so the slice shouldn't include it
753 753 return [context.changectx(self, i)
754 754 for i in xrange(*changeid.indices(len(self)))
755 755 if i not in self.changelog.filteredrevs]
756 756 try:
757 757 return context.changectx(self, changeid)
758 758 except error.WdirUnsupported:
759 759 return context.workingctx(self)
760 760
761 761 def __contains__(self, changeid):
762 762 """True if the given changeid exists
763 763
764 764 error.LookupError is raised if an ambiguous node specified.
765 765 """
766 766 try:
767 767 self[changeid]
768 768 return True
769 769 except error.RepoLookupError:
770 770 return False
771 771
772 772 def __nonzero__(self):
773 773 return True
774 774
775 775 __bool__ = __nonzero__
776 776
777 777 def __len__(self):
778 778 return len(self.changelog)
779 779
780 780 def __iter__(self):
781 781 return iter(self.changelog)
782 782
783 783 def revs(self, expr, *args):
784 784 '''Find revisions matching a revset.
785 785
786 786 The revset is specified as a string ``expr`` that may contain
787 787 %-formatting to escape certain types. See ``revsetlang.formatspec``.
788 788
789 789 Revset aliases from the configuration are not expanded. To expand
790 790 user aliases, consider calling ``scmutil.revrange()`` or
791 791 ``repo.anyrevs([expr], user=True)``.
792 792
793 793 Returns a revset.abstractsmartset, which is a list-like interface
794 794 that contains integer revisions.
795 795 '''
796 796 expr = revsetlang.formatspec(expr, *args)
797 797 m = revset.match(None, expr)
798 798 return m(self)
799 799
800 800 def set(self, expr, *args):
801 801 '''Find revisions matching a revset and emit changectx instances.
802 802
803 803 This is a convenience wrapper around ``revs()`` that iterates the
804 804 result and is a generator of changectx instances.
805 805
806 806 Revset aliases from the configuration are not expanded. To expand
807 807 user aliases, consider calling ``scmutil.revrange()``.
808 808 '''
809 809 for r in self.revs(expr, *args):
810 810 yield self[r]
811 811
812 812 def anyrevs(self, specs, user=False, localalias=None):
813 813 '''Find revisions matching one of the given revsets.
814 814
815 815 Revset aliases from the configuration are not expanded by default. To
816 816 expand user aliases, specify ``user=True``. To provide some local
817 817 definitions overriding user aliases, set ``localalias`` to
818 818 ``{name: definitionstring}``.
819 819 '''
820 820 if user:
821 821 m = revset.matchany(self.ui, specs, repo=self,
822 822 localalias=localalias)
823 823 else:
824 824 m = revset.matchany(None, specs, localalias=localalias)
825 825 return m(self)
826 826
827 827 def url(self):
828 828 return 'file:' + self.root
829 829
830 830 def hook(self, name, throw=False, **args):
831 831 """Call a hook, passing this repo instance.
832 832
833 833 This a convenience method to aid invoking hooks. Extensions likely
834 834 won't call this unless they have registered a custom hook or are
835 835 replacing code that is expected to call a hook.
836 836 """
837 837 return hook.hook(self.ui, self, name, throw, **args)
838 838
839 839 @filteredpropertycache
840 840 def _tagscache(self):
841 841 '''Returns a tagscache object that contains various tags related
842 842 caches.'''
843 843
844 844 # This simplifies its cache management by having one decorated
845 845 # function (this one) and the rest simply fetch things from it.
846 846 class tagscache(object):
847 847 def __init__(self):
848 848 # These two define the set of tags for this repository. tags
849 849 # maps tag name to node; tagtypes maps tag name to 'global' or
850 850 # 'local'. (Global tags are defined by .hgtags across all
851 851 # heads, and local tags are defined in .hg/localtags.)
852 852 # They constitute the in-memory cache of tags.
853 853 self.tags = self.tagtypes = None
854 854
855 855 self.nodetagscache = self.tagslist = None
856 856
857 857 cache = tagscache()
858 858 cache.tags, cache.tagtypes = self._findtags()
859 859
860 860 return cache
861 861
862 862 def tags(self):
863 863 '''return a mapping of tag to node'''
864 864 t = {}
865 865 if self.changelog.filteredrevs:
866 866 tags, tt = self._findtags()
867 867 else:
868 868 tags = self._tagscache.tags
869 869 for k, v in tags.iteritems():
870 870 try:
871 871 # ignore tags to unknown nodes
872 872 self.changelog.rev(v)
873 873 t[k] = v
874 874 except (error.LookupError, ValueError):
875 875 pass
876 876 return t
877 877
878 878 def _findtags(self):
879 879 '''Do the hard work of finding tags. Return a pair of dicts
880 880 (tags, tagtypes) where tags maps tag name to node, and tagtypes
881 881 maps tag name to a string like \'global\' or \'local\'.
882 882 Subclasses or extensions are free to add their own tags, but
883 883 should be aware that the returned dicts will be retained for the
884 884 duration of the localrepo object.'''
885 885
886 886 # XXX what tagtype should subclasses/extensions use? Currently
887 887 # mq and bookmarks add tags, but do not set the tagtype at all.
888 888 # Should each extension invent its own tag type? Should there
889 889 # be one tagtype for all such "virtual" tags? Or is the status
890 890 # quo fine?
891 891
892 892
893 893 # map tag name to (node, hist)
894 894 alltags = tagsmod.findglobaltags(self.ui, self)
895 895 # map tag name to tag type
896 896 tagtypes = dict((tag, 'global') for tag in alltags)
897 897
898 898 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
899 899
900 900 # Build the return dicts. Have to re-encode tag names because
901 901 # the tags module always uses UTF-8 (in order not to lose info
902 902 # writing to the cache), but the rest of Mercurial wants them in
903 903 # local encoding.
904 904 tags = {}
905 905 for (name, (node, hist)) in alltags.iteritems():
906 906 if node != nullid:
907 907 tags[encoding.tolocal(name)] = node
908 908 tags['tip'] = self.changelog.tip()
909 909 tagtypes = dict([(encoding.tolocal(name), value)
910 910 for (name, value) in tagtypes.iteritems()])
911 911 return (tags, tagtypes)
912 912
913 913 def tagtype(self, tagname):
914 914 '''
915 915 return the type of the given tag. result can be:
916 916
917 917 'local' : a local tag
918 918 'global' : a global tag
919 919 None : tag does not exist
920 920 '''
921 921
922 922 return self._tagscache.tagtypes.get(tagname)
923 923
924 924 def tagslist(self):
925 925 '''return a list of tags ordered by revision'''
926 926 if not self._tagscache.tagslist:
927 927 l = []
928 928 for t, n in self.tags().iteritems():
929 929 l.append((self.changelog.rev(n), t, n))
930 930 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
931 931
932 932 return self._tagscache.tagslist
933 933
934 934 def nodetags(self, node):
935 935 '''return the tags associated with a node'''
936 936 if not self._tagscache.nodetagscache:
937 937 nodetagscache = {}
938 938 for t, n in self._tagscache.tags.iteritems():
939 939 nodetagscache.setdefault(n, []).append(t)
940 940 for tags in nodetagscache.itervalues():
941 941 tags.sort()
942 942 self._tagscache.nodetagscache = nodetagscache
943 943 return self._tagscache.nodetagscache.get(node, [])
944 944
945 945 def nodebookmarks(self, node):
946 946 """return the list of bookmarks pointing to the specified node"""
947 947 marks = []
948 948 for bookmark, n in self._bookmarks.iteritems():
949 949 if n == node:
950 950 marks.append(bookmark)
951 951 return sorted(marks)
952 952
953 953 def branchmap(self):
954 954 '''returns a dictionary {branch: [branchheads]} with branchheads
955 955 ordered by increasing revision number'''
956 956 branchmap.updatecache(self)
957 957 return self._branchcaches[self.filtername]
958 958
959 959 @unfilteredmethod
960 960 def revbranchcache(self):
961 961 if not self._revbranchcache:
962 962 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
963 963 return self._revbranchcache
964 964
965 965 def branchtip(self, branch, ignoremissing=False):
966 966 '''return the tip node for a given branch
967 967
968 968 If ignoremissing is True, then this method will not raise an error.
969 969 This is helpful for callers that only expect None for a missing branch
970 970 (e.g. namespace).
971 971
972 972 '''
973 973 try:
974 974 return self.branchmap().branchtip(branch)
975 975 except KeyError:
976 976 if not ignoremissing:
977 977 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
978 978 else:
979 979 pass
980 980
981 981 def lookup(self, key):
982 982 return self[key].node()
983 983
984 984 def lookupbranch(self, key, remote=None):
985 985 repo = remote or self
986 986 if key in repo.branchmap():
987 987 return key
988 988
989 989 repo = (remote and remote.local()) and remote or self
990 990 return repo[key].branch()
991 991
992 992 def known(self, nodes):
993 993 cl = self.changelog
994 994 nm = cl.nodemap
995 995 filtered = cl.filteredrevs
996 996 result = []
997 997 for n in nodes:
998 998 r = nm.get(n)
999 999 resp = not (r is None or r in filtered)
1000 1000 result.append(resp)
1001 1001 return result
1002 1002
1003 1003 def local(self):
1004 1004 return self
1005 1005
1006 1006 def publishing(self):
1007 1007 # it's safe (and desirable) to trust the publish flag unconditionally
1008 1008 # so that we don't finalize changes shared between users via ssh or nfs
1009 1009 return self.ui.configbool('phases', 'publish', untrusted=True)
1010 1010
1011 1011 def cancopy(self):
1012 1012 # so statichttprepo's override of local() works
1013 1013 if not self.local():
1014 1014 return False
1015 1015 if not self.publishing():
1016 1016 return True
1017 1017 # if publishing we can't copy if there is filtered content
1018 1018 return not self.filtered('visible').changelog.filteredrevs
1019 1019
1020 1020 def shared(self):
1021 1021 '''the type of shared repository (None if not shared)'''
1022 1022 if self.sharedpath != self.path:
1023 1023 return 'store'
1024 1024 return None
1025 1025
1026 1026 def wjoin(self, f, *insidef):
1027 1027 return self.vfs.reljoin(self.root, f, *insidef)
1028 1028
1029 1029 def file(self, f):
1030 1030 if f[0] == '/':
1031 1031 f = f[1:]
1032 1032 return filelog.filelog(self.svfs, f)
1033 1033
1034 1034 def changectx(self, changeid):
1035 1035 return self[changeid]
1036 1036
1037 1037 def setparents(self, p1, p2=nullid):
1038 1038 with self.dirstate.parentchange():
1039 1039 copies = self.dirstate.setparents(p1, p2)
1040 1040 pctx = self[p1]
1041 1041 if copies:
1042 1042 # Adjust copy records, the dirstate cannot do it, it
1043 1043 # requires access to parents manifests. Preserve them
1044 1044 # only for entries added to first parent.
1045 1045 for f in copies:
1046 1046 if f not in pctx and copies[f] in pctx:
1047 1047 self.dirstate.copy(copies[f], f)
1048 1048 if p2 == nullid:
1049 1049 for f, s in sorted(self.dirstate.copies().items()):
1050 1050 if f not in pctx and s not in pctx:
1051 1051 self.dirstate.copy(None, f)
1052 1052
1053 1053 def filectx(self, path, changeid=None, fileid=None):
1054 1054 """changeid can be a changeset revision, node, or tag.
1055 1055 fileid can be a file revision or node."""
1056 1056 return context.filectx(self, path, changeid, fileid)
1057 1057
1058 1058 def getcwd(self):
1059 1059 return self.dirstate.getcwd()
1060 1060
1061 1061 def pathto(self, f, cwd=None):
1062 1062 return self.dirstate.pathto(f, cwd)
1063 1063
1064 1064 def _loadfilter(self, filter):
1065 1065 if filter not in self.filterpats:
1066 1066 l = []
1067 1067 for pat, cmd in self.ui.configitems(filter):
1068 1068 if cmd == '!':
1069 1069 continue
1070 1070 mf = matchmod.match(self.root, '', [pat])
1071 1071 fn = None
1072 1072 params = cmd
1073 1073 for name, filterfn in self._datafilters.iteritems():
1074 1074 if cmd.startswith(name):
1075 1075 fn = filterfn
1076 1076 params = cmd[len(name):].lstrip()
1077 1077 break
1078 1078 if not fn:
1079 1079 fn = lambda s, c, **kwargs: util.filter(s, c)
1080 1080 # Wrap old filters not supporting keyword arguments
1081 1081 if not inspect.getargspec(fn)[2]:
1082 1082 oldfn = fn
1083 1083 fn = lambda s, c, **kwargs: oldfn(s, c)
1084 1084 l.append((mf, fn, params))
1085 1085 self.filterpats[filter] = l
1086 1086 return self.filterpats[filter]
1087 1087
1088 1088 def _filter(self, filterpats, filename, data):
1089 1089 for mf, fn, cmd in filterpats:
1090 1090 if mf(filename):
1091 1091 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1092 1092 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1093 1093 break
1094 1094
1095 1095 return data
1096 1096
1097 1097 @unfilteredpropertycache
1098 1098 def _encodefilterpats(self):
1099 1099 return self._loadfilter('encode')
1100 1100
1101 1101 @unfilteredpropertycache
1102 1102 def _decodefilterpats(self):
1103 1103 return self._loadfilter('decode')
1104 1104
1105 1105 def adddatafilter(self, name, filter):
1106 1106 self._datafilters[name] = filter
1107 1107
1108 1108 def wread(self, filename):
1109 1109 if self.wvfs.islink(filename):
1110 1110 data = self.wvfs.readlink(filename)
1111 1111 else:
1112 1112 data = self.wvfs.read(filename)
1113 1113 return self._filter(self._encodefilterpats, filename, data)
1114 1114
1115 1115 def wwrite(self, filename, data, flags, backgroundclose=False):
1116 1116 """write ``data`` into ``filename`` in the working directory
1117 1117
1118 1118 This returns length of written (maybe decoded) data.
1119 1119 """
1120 1120 data = self._filter(self._decodefilterpats, filename, data)
1121 1121 if 'l' in flags:
1122 1122 self.wvfs.symlink(data, filename)
1123 1123 else:
1124 1124 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1125 1125 if 'x' in flags:
1126 1126 self.wvfs.setflags(filename, False, True)
1127 1127 return len(data)
1128 1128
1129 1129 def wwritedata(self, filename, data):
1130 1130 return self._filter(self._decodefilterpats, filename, data)
1131 1131
1132 1132 def currenttransaction(self):
1133 1133 """return the current transaction or None if non exists"""
1134 1134 if self._transref:
1135 1135 tr = self._transref()
1136 1136 else:
1137 1137 tr = None
1138 1138
1139 1139 if tr and tr.running():
1140 1140 return tr
1141 1141 return None
1142 1142
1143 1143 def transaction(self, desc, report=None):
1144 1144 if (self.ui.configbool('devel', 'all-warnings')
1145 1145 or self.ui.configbool('devel', 'check-locks')):
1146 1146 if self._currentlock(self._lockref) is None:
1147 1147 raise error.ProgrammingError('transaction requires locking')
1148 1148 tr = self.currenttransaction()
1149 1149 if tr is not None:
1150 1150 scmutil.registersummarycallback(self, tr, desc)
1151 1151 return tr.nest()
1152 1152
1153 1153 # abort here if the journal already exists
1154 1154 if self.svfs.exists("journal"):
1155 1155 raise error.RepoError(
1156 1156 _("abandoned transaction found"),
1157 1157 hint=_("run 'hg recover' to clean up transaction"))
1158 1158
1159 1159 idbase = "%.40f#%f" % (random.random(), time.time())
1160 1160 ha = hex(hashlib.sha1(idbase).digest())
1161 1161 txnid = 'TXN:' + ha
1162 1162 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1163 1163
1164 1164 self._writejournal(desc)
1165 1165 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1166 1166 if report:
1167 1167 rp = report
1168 1168 else:
1169 1169 rp = self.ui.warn
1170 1170 vfsmap = {'plain': self.vfs} # root of .hg/
1171 1171 # we must avoid cyclic reference between repo and transaction.
1172 1172 reporef = weakref.ref(self)
1173 1173 # Code to track tag movement
1174 1174 #
1175 1175 # Since tags are all handled as file content, it is actually quite hard
1176 1176 # to track these movement from a code perspective. So we fallback to a
1177 1177 # tracking at the repository level. One could envision to track changes
1178 1178 # to the '.hgtags' file through changegroup apply but that fails to
1179 1179 # cope with case where transaction expose new heads without changegroup
1180 1180 # being involved (eg: phase movement).
1181 1181 #
1182 1182 # For now, We gate the feature behind a flag since this likely comes
1183 1183 # with performance impacts. The current code run more often than needed
1184 1184 # and do not use caches as much as it could. The current focus is on
1185 1185 # the behavior of the feature so we disable it by default. The flag
1186 1186 # will be removed when we are happy with the performance impact.
1187 1187 #
1188 1188 # Once this feature is no longer experimental move the following
1189 1189 # documentation to the appropriate help section:
1190 1190 #
1191 1191 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1192 1192 # tags (new or changed or deleted tags). In addition the details of
1193 1193 # these changes are made available in a file at:
1194 1194 # ``REPOROOT/.hg/changes/tags.changes``.
1195 1195 # Make sure you check for HG_TAG_MOVED before reading that file as it
1196 1196 # might exist from a previous transaction even if no tag were touched
1197 1197 # in this one. Changes are recorded in a line base format::
1198 1198 #
1199 1199 # <action> <hex-node> <tag-name>\n
1200 1200 #
1201 1201 # Actions are defined as follow:
1202 1202 # "-R": tag is removed,
1203 1203 # "+A": tag is added,
1204 1204 # "-M": tag is moved (old value),
1205 1205 # "+M": tag is moved (new value),
1206 1206 tracktags = lambda x: None
1207 1207 # experimental config: experimental.hook-track-tags
1208 1208 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1209 1209 if desc != 'strip' and shouldtracktags:
1210 1210 oldheads = self.changelog.headrevs()
1211 1211 def tracktags(tr2):
1212 1212 repo = reporef()
1213 1213 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1214 1214 newheads = repo.changelog.headrevs()
1215 1215 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1216 1216 # notes: we compare lists here.
1217 1217 # As we do it only once buiding set would not be cheaper
1218 1218 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1219 1219 if changes:
1220 1220 tr2.hookargs['tag_moved'] = '1'
1221 1221 with repo.vfs('changes/tags.changes', 'w',
1222 1222 atomictemp=True) as changesfile:
1223 1223 # note: we do not register the file to the transaction
1224 1224 # because we needs it to still exist on the transaction
1225 1225 # is close (for txnclose hooks)
1226 1226 tagsmod.writediff(changesfile, changes)
1227 1227 def validate(tr2):
1228 1228 """will run pre-closing hooks"""
1229 1229 # XXX the transaction API is a bit lacking here so we take a hacky
1230 1230 # path for now
1231 1231 #
1232 1232 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1233 1233 # dict is copied before these run. In addition we needs the data
1234 1234 # available to in memory hooks too.
1235 1235 #
1236 1236 # Moreover, we also need to make sure this runs before txnclose
1237 1237 # hooks and there is no "pending" mechanism that would execute
1238 1238 # logic only if hooks are about to run.
1239 1239 #
1240 1240 # Fixing this limitation of the transaction is also needed to track
1241 1241 # other families of changes (bookmarks, phases, obsolescence).
1242 1242 #
1243 1243 # This will have to be fixed before we remove the experimental
1244 1244 # gating.
1245 1245 tracktags(tr2)
1246 1246 repo = reporef()
1247 1247 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1248 1248 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1249 1249 args = tr.hookargs.copy()
1250 1250 args.update(bookmarks.preparehookargs(name, old, new))
1251 1251 repo.hook('pretxnclose-bookmark', throw=True,
1252 1252 txnname=desc,
1253 1253 **pycompat.strkwargs(args))
1254 1254 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1255 1255 cl = repo.unfiltered().changelog
1256 1256 for rev, (old, new) in tr.changes['phases'].items():
1257 1257 args = tr.hookargs.copy()
1258 1258 node = hex(cl.node(rev))
1259 1259 args.update(phases.preparehookargs(node, old, new))
1260 1260 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1261 1261 **pycompat.strkwargs(args))
1262 1262
1263 1263 repo.hook('pretxnclose', throw=True,
1264 1264 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1265 1265 def releasefn(tr, success):
1266 1266 repo = reporef()
1267 1267 if success:
1268 1268 # this should be explicitly invoked here, because
1269 1269 # in-memory changes aren't written out at closing
1270 1270 # transaction, if tr.addfilegenerator (via
1271 1271 # dirstate.write or so) isn't invoked while
1272 1272 # transaction running
1273 1273 repo.dirstate.write(None)
1274 1274 else:
1275 1275 # discard all changes (including ones already written
1276 1276 # out) in this transaction
1277 1277 repo.dirstate.restorebackup(None, 'journal.dirstate')
1278 1278
1279 1279 repo.invalidate(clearfilecache=True)
1280 1280
1281 1281 tr = transaction.transaction(rp, self.svfs, vfsmap,
1282 1282 "journal",
1283 1283 "undo",
1284 1284 aftertrans(renames),
1285 1285 self.store.createmode,
1286 1286 validator=validate,
1287 1287 releasefn=releasefn,
1288 1288 checkambigfiles=_cachedfiles)
1289 1289 tr.changes['revs'] = set()
1290 1290 tr.changes['obsmarkers'] = set()
1291 1291 tr.changes['phases'] = {}
1292 1292 tr.changes['bookmarks'] = {}
1293 1293
1294 1294 tr.hookargs['txnid'] = txnid
1295 1295 # note: writing the fncache only during finalize mean that the file is
1296 1296 # outdated when running hooks. As fncache is used for streaming clone,
1297 1297 # this is not expected to break anything that happen during the hooks.
1298 1298 tr.addfinalize('flush-fncache', self.store.write)
1299 1299 def txnclosehook(tr2):
1300 1300 """To be run if transaction is successful, will schedule a hook run
1301 1301 """
1302 1302 # Don't reference tr2 in hook() so we don't hold a reference.
1303 1303 # This reduces memory consumption when there are multiple
1304 1304 # transactions per lock. This can likely go away if issue5045
1305 1305 # fixes the function accumulation.
1306 1306 hookargs = tr2.hookargs
1307 1307
1308 1308 def hookfunc():
1309 1309 repo = reporef()
1310 1310 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1311 1311 bmchanges = sorted(tr.changes['bookmarks'].items())
1312 1312 for name, (old, new) in bmchanges:
1313 1313 args = tr.hookargs.copy()
1314 1314 args.update(bookmarks.preparehookargs(name, old, new))
1315 1315 repo.hook('txnclose-bookmark', throw=False,
1316 1316 txnname=desc, **pycompat.strkwargs(args))
1317 1317
1318 1318 if hook.hashook(repo.ui, 'txnclose-phase'):
1319 1319 cl = repo.unfiltered().changelog
1320 1320 phasemv = sorted(tr.changes['phases'].items())
1321 1321 for rev, (old, new) in phasemv:
1322 1322 args = tr.hookargs.copy()
1323 1323 node = hex(cl.node(rev))
1324 1324 args.update(phases.preparehookargs(node, old, new))
1325 1325 repo.hook('txnclose-phase', throw=False, txnname=desc,
1326 1326 **pycompat.strkwargs(args))
1327 1327
1328 1328 repo.hook('txnclose', throw=False, txnname=desc,
1329 1329 **pycompat.strkwargs(hookargs))
1330 1330 reporef()._afterlock(hookfunc)
1331 1331 tr.addfinalize('txnclose-hook', txnclosehook)
1332 1332 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1333 1333 def txnaborthook(tr2):
1334 1334 """To be run if transaction is aborted
1335 1335 """
1336 1336 reporef().hook('txnabort', throw=False, txnname=desc,
1337 1337 **tr2.hookargs)
1338 1338 tr.addabort('txnabort-hook', txnaborthook)
1339 1339 # avoid eager cache invalidation. in-memory data should be identical
1340 1340 # to stored data if transaction has no error.
1341 1341 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1342 1342 self._transref = weakref.ref(tr)
1343 1343 scmutil.registersummarycallback(self, tr, desc)
1344 1344 return tr
1345 1345
1346 1346 def _journalfiles(self):
1347 1347 return ((self.svfs, 'journal'),
1348 1348 (self.vfs, 'journal.dirstate'),
1349 1349 (self.vfs, 'journal.branch'),
1350 1350 (self.vfs, 'journal.desc'),
1351 1351 (self.vfs, 'journal.bookmarks'),
1352 1352 (self.svfs, 'journal.phaseroots'))
1353 1353
1354 1354 def undofiles(self):
1355 1355 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1356 1356
1357 1357 @unfilteredmethod
1358 1358 def _writejournal(self, desc):
1359 1359 self.dirstate.savebackup(None, 'journal.dirstate')
1360 1360 self.vfs.write("journal.branch",
1361 1361 encoding.fromlocal(self.dirstate.branch()))
1362 1362 self.vfs.write("journal.desc",
1363 1363 "%d\n%s\n" % (len(self), desc))
1364 1364 self.vfs.write("journal.bookmarks",
1365 1365 self.vfs.tryread("bookmarks"))
1366 1366 self.svfs.write("journal.phaseroots",
1367 1367 self.svfs.tryread("phaseroots"))
1368 1368
1369 1369 def recover(self):
1370 1370 with self.lock():
1371 1371 if self.svfs.exists("journal"):
1372 1372 self.ui.status(_("rolling back interrupted transaction\n"))
1373 1373 vfsmap = {'': self.svfs,
1374 1374 'plain': self.vfs,}
1375 1375 transaction.rollback(self.svfs, vfsmap, "journal",
1376 1376 self.ui.warn,
1377 1377 checkambigfiles=_cachedfiles)
1378 1378 self.invalidate()
1379 1379 return True
1380 1380 else:
1381 1381 self.ui.warn(_("no interrupted transaction available\n"))
1382 1382 return False
1383 1383
1384 1384 def rollback(self, dryrun=False, force=False):
1385 1385 wlock = lock = dsguard = None
1386 1386 try:
1387 1387 wlock = self.wlock()
1388 1388 lock = self.lock()
1389 1389 if self.svfs.exists("undo"):
1390 1390 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1391 1391
1392 1392 return self._rollback(dryrun, force, dsguard)
1393 1393 else:
1394 1394 self.ui.warn(_("no rollback information available\n"))
1395 1395 return 1
1396 1396 finally:
1397 1397 release(dsguard, lock, wlock)
1398 1398
1399 1399 @unfilteredmethod # Until we get smarter cache management
1400 1400 def _rollback(self, dryrun, force, dsguard):
1401 1401 ui = self.ui
1402 1402 try:
1403 1403 args = self.vfs.read('undo.desc').splitlines()
1404 1404 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1405 1405 if len(args) >= 3:
1406 1406 detail = args[2]
1407 1407 oldtip = oldlen - 1
1408 1408
1409 1409 if detail and ui.verbose:
1410 1410 msg = (_('repository tip rolled back to revision %d'
1411 1411 ' (undo %s: %s)\n')
1412 1412 % (oldtip, desc, detail))
1413 1413 else:
1414 1414 msg = (_('repository tip rolled back to revision %d'
1415 1415 ' (undo %s)\n')
1416 1416 % (oldtip, desc))
1417 1417 except IOError:
1418 1418 msg = _('rolling back unknown transaction\n')
1419 1419 desc = None
1420 1420
1421 1421 if not force and self['.'] != self['tip'] and desc == 'commit':
1422 1422 raise error.Abort(
1423 1423 _('rollback of last commit while not checked out '
1424 1424 'may lose data'), hint=_('use -f to force'))
1425 1425
1426 1426 ui.status(msg)
1427 1427 if dryrun:
1428 1428 return 0
1429 1429
1430 1430 parents = self.dirstate.parents()
1431 1431 self.destroying()
1432 1432 vfsmap = {'plain': self.vfs, '': self.svfs}
1433 1433 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1434 1434 checkambigfiles=_cachedfiles)
1435 1435 if self.vfs.exists('undo.bookmarks'):
1436 1436 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1437 1437 if self.svfs.exists('undo.phaseroots'):
1438 1438 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1439 1439 self.invalidate()
1440 1440
1441 1441 parentgone = (parents[0] not in self.changelog.nodemap or
1442 1442 parents[1] not in self.changelog.nodemap)
1443 1443 if parentgone:
1444 1444 # prevent dirstateguard from overwriting already restored one
1445 1445 dsguard.close()
1446 1446
1447 1447 self.dirstate.restorebackup(None, 'undo.dirstate')
1448 1448 try:
1449 1449 branch = self.vfs.read('undo.branch')
1450 1450 self.dirstate.setbranch(encoding.tolocal(branch))
1451 1451 except IOError:
1452 1452 ui.warn(_('named branch could not be reset: '
1453 1453 'current branch is still \'%s\'\n')
1454 1454 % self.dirstate.branch())
1455 1455
1456 1456 parents = tuple([p.rev() for p in self[None].parents()])
1457 1457 if len(parents) > 1:
1458 1458 ui.status(_('working directory now based on '
1459 1459 'revisions %d and %d\n') % parents)
1460 1460 else:
1461 1461 ui.status(_('working directory now based on '
1462 1462 'revision %d\n') % parents)
1463 1463 mergemod.mergestate.clean(self, self['.'].node())
1464 1464
1465 1465 # TODO: if we know which new heads may result from this rollback, pass
1466 1466 # them to destroy(), which will prevent the branchhead cache from being
1467 1467 # invalidated.
1468 1468 self.destroyed()
1469 1469 return 0
1470 1470
1471 1471 def _buildcacheupdater(self, newtransaction):
1472 1472 """called during transaction to build the callback updating cache
1473 1473
1474 1474 Lives on the repository to help extension who might want to augment
1475 1475 this logic. For this purpose, the created transaction is passed to the
1476 1476 method.
1477 1477 """
1478 1478 # we must avoid cyclic reference between repo and transaction.
1479 1479 reporef = weakref.ref(self)
1480 1480 def updater(tr):
1481 1481 repo = reporef()
1482 1482 repo.updatecaches(tr)
1483 1483 return updater
1484 1484
1485 1485 @unfilteredmethod
1486 1486 def updatecaches(self, tr=None):
1487 1487 """warm appropriate caches
1488 1488
1489 1489 If this function is called after a transaction closed. The transaction
1490 1490 will be available in the 'tr' argument. This can be used to selectively
1491 1491 update caches relevant to the changes in that transaction.
1492 1492 """
1493 1493 if tr is not None and tr.hookargs.get('source') == 'strip':
1494 1494 # During strip, many caches are invalid but
1495 1495 # later call to `destroyed` will refresh them.
1496 1496 return
1497 1497
1498 1498 if tr is None or tr.changes['revs']:
1499 1499 # updating the unfiltered branchmap should refresh all the others,
1500 1500 self.ui.debug('updating the branch cache\n')
1501 1501 branchmap.updatecache(self.filtered('served'))
1502 1502
1503 1503 def invalidatecaches(self):
1504 1504
1505 1505 if '_tagscache' in vars(self):
1506 1506 # can't use delattr on proxy
1507 1507 del self.__dict__['_tagscache']
1508 1508
1509 1509 self.unfiltered()._branchcaches.clear()
1510 1510 self.invalidatevolatilesets()
1511 1511 self._sparsesignaturecache.clear()
1512 1512
1513 1513 def invalidatevolatilesets(self):
1514 1514 self.filteredrevcache.clear()
1515 1515 obsolete.clearobscaches(self)
1516 1516
1517 1517 def invalidatedirstate(self):
1518 1518 '''Invalidates the dirstate, causing the next call to dirstate
1519 1519 to check if it was modified since the last time it was read,
1520 1520 rereading it if it has.
1521 1521
1522 1522 This is different to dirstate.invalidate() that it doesn't always
1523 1523 rereads the dirstate. Use dirstate.invalidate() if you want to
1524 1524 explicitly read the dirstate again (i.e. restoring it to a previous
1525 1525 known good state).'''
1526 1526 if hasunfilteredcache(self, 'dirstate'):
1527 1527 for k in self.dirstate._filecache:
1528 1528 try:
1529 1529 delattr(self.dirstate, k)
1530 1530 except AttributeError:
1531 1531 pass
1532 1532 delattr(self.unfiltered(), 'dirstate')
1533 1533
1534 1534 def invalidate(self, clearfilecache=False):
1535 1535 '''Invalidates both store and non-store parts other than dirstate
1536 1536
1537 1537 If a transaction is running, invalidation of store is omitted,
1538 1538 because discarding in-memory changes might cause inconsistency
1539 1539 (e.g. incomplete fncache causes unintentional failure, but
1540 1540 redundant one doesn't).
1541 1541 '''
1542 1542 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1543 1543 for k in list(self._filecache.keys()):
1544 1544 # dirstate is invalidated separately in invalidatedirstate()
1545 1545 if k == 'dirstate':
1546 1546 continue
1547 1547 if (k == 'changelog' and
1548 1548 self.currenttransaction() and
1549 1549 self.changelog._delayed):
1550 1550 # The changelog object may store unwritten revisions. We don't
1551 1551 # want to lose them.
1552 1552 # TODO: Solve the problem instead of working around it.
1553 1553 continue
1554 1554
1555 1555 if clearfilecache:
1556 1556 del self._filecache[k]
1557 1557 try:
1558 1558 delattr(unfiltered, k)
1559 1559 except AttributeError:
1560 1560 pass
1561 1561 self.invalidatecaches()
1562 1562 if not self.currenttransaction():
1563 1563 # TODO: Changing contents of store outside transaction
1564 1564 # causes inconsistency. We should make in-memory store
1565 1565 # changes detectable, and abort if changed.
1566 1566 self.store.invalidatecaches()
1567 1567
1568 1568 def invalidateall(self):
1569 1569 '''Fully invalidates both store and non-store parts, causing the
1570 1570 subsequent operation to reread any outside changes.'''
1571 1571 # extension should hook this to invalidate its caches
1572 1572 self.invalidate()
1573 1573 self.invalidatedirstate()
1574 1574
1575 1575 @unfilteredmethod
1576 1576 def _refreshfilecachestats(self, tr):
1577 1577 """Reload stats of cached files so that they are flagged as valid"""
1578 1578 for k, ce in self._filecache.items():
1579 1579 if k == 'dirstate' or k not in self.__dict__:
1580 1580 continue
1581 1581 ce.refresh()
1582 1582
1583 1583 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1584 1584 inheritchecker=None, parentenvvar=None):
1585 1585 parentlock = None
1586 1586 # the contents of parentenvvar are used by the underlying lock to
1587 1587 # determine whether it can be inherited
1588 1588 if parentenvvar is not None:
1589 1589 parentlock = encoding.environ.get(parentenvvar)
1590 1590 try:
1591 1591 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1592 1592 acquirefn=acquirefn, desc=desc,
1593 1593 inheritchecker=inheritchecker,
1594 1594 parentlock=parentlock)
1595 1595 except error.LockHeld as inst:
1596 1596 if not wait:
1597 1597 raise
1598 1598 # show more details for new-style locks
1599 1599 if ':' in inst.locker:
1600 1600 host, pid = inst.locker.split(":", 1)
1601 1601 self.ui.warn(
1602 1602 _("waiting for lock on %s held by process %r "
1603 1603 "on host %r\n") % (desc, pid, host))
1604 1604 else:
1605 1605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1606 1606 (desc, inst.locker))
1607 1607 # default to 600 seconds timeout
1608 1608 l = lockmod.lock(vfs, lockname,
1609 1609 int(self.ui.config("ui", "timeout")),
1610 1610 releasefn=releasefn, acquirefn=acquirefn,
1611 1611 desc=desc)
1612 1612 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1613 1613 return l
1614 1614
1615 1615 def _afterlock(self, callback):
1616 1616 """add a callback to be run when the repository is fully unlocked
1617 1617
1618 1618 The callback will be executed when the outermost lock is released
1619 1619 (with wlock being higher level than 'lock')."""
1620 1620 for ref in (self._wlockref, self._lockref):
1621 1621 l = ref and ref()
1622 1622 if l and l.held:
1623 1623 l.postrelease.append(callback)
1624 1624 break
1625 1625 else: # no lock have been found.
1626 1626 callback()
1627 1627
1628 1628 def lock(self, wait=True):
1629 1629 '''Lock the repository store (.hg/store) and return a weak reference
1630 1630 to the lock. Use this before modifying the store (e.g. committing or
1631 1631 stripping). If you are opening a transaction, get a lock as well.)
1632 1632
1633 1633 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1634 1634 'wlock' first to avoid a dead-lock hazard.'''
1635 1635 l = self._currentlock(self._lockref)
1636 1636 if l is not None:
1637 1637 l.lock()
1638 1638 return l
1639 1639
1640 1640 l = self._lock(self.svfs, "lock", wait, None,
1641 1641 self.invalidate, _('repository %s') % self.origroot)
1642 1642 self._lockref = weakref.ref(l)
1643 1643 return l
1644 1644
1645 1645 def _wlockchecktransaction(self):
1646 1646 if self.currenttransaction() is not None:
1647 1647 raise error.LockInheritanceContractViolation(
1648 1648 'wlock cannot be inherited in the middle of a transaction')
1649 1649
1650 1650 def wlock(self, wait=True):
1651 1651 '''Lock the non-store parts of the repository (everything under
1652 1652 .hg except .hg/store) and return a weak reference to the lock.
1653 1653
1654 1654 Use this before modifying files in .hg.
1655 1655
1656 1656 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1657 1657 'wlock' first to avoid a dead-lock hazard.'''
1658 1658 l = self._wlockref and self._wlockref()
1659 1659 if l is not None and l.held:
1660 1660 l.lock()
1661 1661 return l
1662 1662
1663 1663 # We do not need to check for non-waiting lock acquisition. Such
1664 1664 # acquisition would not cause dead-lock as they would just fail.
1665 1665 if wait and (self.ui.configbool('devel', 'all-warnings')
1666 1666 or self.ui.configbool('devel', 'check-locks')):
1667 1667 if self._currentlock(self._lockref) is not None:
1668 1668 self.ui.develwarn('"wlock" acquired after "lock"')
1669 1669
1670 1670 def unlock():
1671 1671 if self.dirstate.pendingparentchange():
1672 1672 self.dirstate.invalidate()
1673 1673 else:
1674 1674 self.dirstate.write(None)
1675 1675
1676 1676 self._filecache['dirstate'].refresh()
1677 1677
1678 1678 l = self._lock(self.vfs, "wlock", wait, unlock,
1679 1679 self.invalidatedirstate, _('working directory of %s') %
1680 1680 self.origroot,
1681 1681 inheritchecker=self._wlockchecktransaction,
1682 1682 parentenvvar='HG_WLOCK_LOCKER')
1683 1683 self._wlockref = weakref.ref(l)
1684 1684 return l
1685 1685
1686 1686 def _currentlock(self, lockref):
1687 1687 """Returns the lock if it's held, or None if it's not."""
1688 1688 if lockref is None:
1689 1689 return None
1690 1690 l = lockref()
1691 1691 if l is None or not l.held:
1692 1692 return None
1693 1693 return l
1694 1694
1695 1695 def currentwlock(self):
1696 1696 """Returns the wlock if it's held, or None if it's not."""
1697 1697 return self._currentlock(self._wlockref)
1698 1698
1699 1699 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1700 1700 """
1701 1701 commit an individual file as part of a larger transaction
1702 1702 """
1703 1703
1704 1704 fname = fctx.path()
1705 1705 fparent1 = manifest1.get(fname, nullid)
1706 1706 fparent2 = manifest2.get(fname, nullid)
1707 1707 if isinstance(fctx, context.filectx):
1708 1708 node = fctx.filenode()
1709 1709 if node in [fparent1, fparent2]:
1710 1710 self.ui.debug('reusing %s filelog entry\n' % fname)
1711 1711 if manifest1.flags(fname) != fctx.flags():
1712 1712 changelist.append(fname)
1713 1713 return node
1714 1714
1715 1715 flog = self.file(fname)
1716 1716 meta = {}
1717 1717 copy = fctx.renamed()
1718 1718 if copy and copy[0] != fname:
1719 1719 # Mark the new revision of this file as a copy of another
1720 1720 # file. This copy data will effectively act as a parent
1721 1721 # of this new revision. If this is a merge, the first
1722 1722 # parent will be the nullid (meaning "look up the copy data")
1723 1723 # and the second one will be the other parent. For example:
1724 1724 #
1725 1725 # 0 --- 1 --- 3 rev1 changes file foo
1726 1726 # \ / rev2 renames foo to bar and changes it
1727 1727 # \- 2 -/ rev3 should have bar with all changes and
1728 1728 # should record that bar descends from
1729 1729 # bar in rev2 and foo in rev1
1730 1730 #
1731 1731 # this allows this merge to succeed:
1732 1732 #
1733 1733 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1734 1734 # \ / merging rev3 and rev4 should use bar@rev2
1735 1735 # \- 2 --- 4 as the merge base
1736 1736 #
1737 1737
1738 1738 cfname = copy[0]
1739 1739 crev = manifest1.get(cfname)
1740 1740 newfparent = fparent2
1741 1741
1742 1742 if manifest2: # branch merge
1743 1743 if fparent2 == nullid or crev is None: # copied on remote side
1744 1744 if cfname in manifest2:
1745 1745 crev = manifest2[cfname]
1746 1746 newfparent = fparent1
1747 1747
1748 1748 # Here, we used to search backwards through history to try to find
1749 1749 # where the file copy came from if the source of a copy was not in
1750 1750 # the parent directory. However, this doesn't actually make sense to
1751 1751 # do (what does a copy from something not in your working copy even
1752 1752 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1753 1753 # the user that copy information was dropped, so if they didn't
1754 1754 # expect this outcome it can be fixed, but this is the correct
1755 1755 # behavior in this circumstance.
1756 1756
1757 1757 if crev:
1758 1758 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1759 1759 meta["copy"] = cfname
1760 1760 meta["copyrev"] = hex(crev)
1761 1761 fparent1, fparent2 = nullid, newfparent
1762 1762 else:
1763 1763 self.ui.warn(_("warning: can't find ancestor for '%s' "
1764 1764 "copied from '%s'!\n") % (fname, cfname))
1765 1765
1766 1766 elif fparent1 == nullid:
1767 1767 fparent1, fparent2 = fparent2, nullid
1768 1768 elif fparent2 != nullid:
1769 1769 # is one parent an ancestor of the other?
1770 1770 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1771 1771 if fparent1 in fparentancestors:
1772 1772 fparent1, fparent2 = fparent2, nullid
1773 1773 elif fparent2 in fparentancestors:
1774 1774 fparent2 = nullid
1775 1775
1776 1776 # is the file changed?
1777 1777 text = fctx.data()
1778 1778 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1779 1779 changelist.append(fname)
1780 1780 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1781 1781 # are just the flags changed during merge?
1782 1782 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1783 1783 changelist.append(fname)
1784 1784
1785 1785 return fparent1
1786 1786
1787 1787 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1788 1788 """check for commit arguments that aren't committable"""
1789 1789 if match.isexact() or match.prefix():
1790 1790 matched = set(status.modified + status.added + status.removed)
1791 1791
1792 1792 for f in match.files():
1793 1793 f = self.dirstate.normalize(f)
1794 1794 if f == '.' or f in matched or f in wctx.substate:
1795 1795 continue
1796 1796 if f in status.deleted:
1797 1797 fail(f, _('file not found!'))
1798 1798 if f in vdirs: # visited directory
1799 1799 d = f + '/'
1800 1800 for mf in matched:
1801 1801 if mf.startswith(d):
1802 1802 break
1803 1803 else:
1804 1804 fail(f, _("no match under directory!"))
1805 1805 elif f not in self.dirstate:
1806 1806 fail(f, _("file not tracked!"))
1807 1807
1808 1808 @unfilteredmethod
1809 1809 def commit(self, text="", user=None, date=None, match=None, force=False,
1810 1810 editor=False, extra=None):
1811 1811 """Add a new revision to current repository.
1812 1812
1813 1813 Revision information is gathered from the working directory,
1814 1814 match can be used to filter the committed files. If editor is
1815 1815 supplied, it is called to get a commit message.
1816 1816 """
1817 1817 if extra is None:
1818 1818 extra = {}
1819 1819
1820 1820 def fail(f, msg):
1821 1821 raise error.Abort('%s: %s' % (f, msg))
1822 1822
1823 1823 if not match:
1824 1824 match = matchmod.always(self.root, '')
1825 1825
1826 1826 if not force:
1827 1827 vdirs = []
1828 1828 match.explicitdir = vdirs.append
1829 1829 match.bad = fail
1830 1830
1831 1831 wlock = lock = tr = None
1832 1832 try:
1833 1833 wlock = self.wlock()
1834 1834 lock = self.lock() # for recent changelog (see issue4368)
1835 1835
1836 1836 wctx = self[None]
1837 1837 merge = len(wctx.parents()) > 1
1838 1838
1839 1839 if not force and merge and not match.always():
1840 1840 raise error.Abort(_('cannot partially commit a merge '
1841 1841 '(do not specify files or patterns)'))
1842 1842
1843 1843 status = self.status(match=match, clean=force)
1844 1844 if force:
1845 1845 status.modified.extend(status.clean) # mq may commit clean files
1846 1846
1847 1847 # check subrepos
1848 subs = []
1849 commitsubs = set()
1850 newstate = wctx.substate.copy()
1851 # only manage subrepos and .hgsubstate if .hgsub is present
1852 if '.hgsub' in wctx:
1853 # we'll decide whether to track this ourselves, thanks
1854 for c in status.modified, status.added, status.removed:
1855 if '.hgsubstate' in c:
1856 c.remove('.hgsubstate')
1857
1858 # compare current state to last committed state
1859 # build new substate based on last committed state
1860 oldstate = wctx.p1().substate
1861 for s in sorted(newstate.keys()):
1862 if not match(s):
1863 # ignore working copy, use old state if present
1864 if s in oldstate:
1865 newstate[s] = oldstate[s]
1866 continue
1867 if not force:
1868 raise error.Abort(
1869 _("commit with new subrepo %s excluded") % s)
1870 dirtyreason = wctx.sub(s).dirtyreason(True)
1871 if dirtyreason:
1872 if not self.ui.configbool('ui', 'commitsubrepos'):
1873 raise error.Abort(dirtyreason,
1874 hint=_("use --subrepos for recursive commit"))
1875 subs.append(s)
1876 commitsubs.add(s)
1877 else:
1878 bs = wctx.sub(s).basestate()
1879 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1880 if oldstate.get(s, (None, None, None))[1] != bs:
1881 subs.append(s)
1882
1883 # check for removed subrepos
1884 for p in wctx.parents():
1885 r = [s for s in p.substate if s not in newstate]
1886 subs += [s for s in r if match(s)]
1887 if subs:
1888 if (not match('.hgsub') and
1889 '.hgsub' in (wctx.modified() + wctx.added())):
1890 raise error.Abort(
1891 _("can't commit subrepos without .hgsub"))
1892 status.modified.insert(0, '.hgsubstate')
1893
1894 elif '.hgsub' in status.removed:
1895 # clean up .hgsubstate when .hgsub is removed
1896 if ('.hgsubstate' in wctx and
1897 '.hgsubstate' not in (status.modified + status.added +
1898 status.removed)):
1899 status.removed.insert(0, '.hgsubstate')
1848 subs, commitsubs, newstate = subrepo.precommit(
1849 self.ui, wctx, status, match, force=force)
1900 1850
1901 1851 # make sure all explicit patterns are matched
1902 1852 if not force:
1903 1853 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1904 1854
1905 1855 cctx = context.workingcommitctx(self, status,
1906 1856 text, user, date, extra)
1907 1857
1908 1858 # internal config: ui.allowemptycommit
1909 1859 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1910 1860 or extra.get('close') or merge or cctx.files()
1911 1861 or self.ui.configbool('ui', 'allowemptycommit'))
1912 1862 if not allowemptycommit:
1913 1863 return None
1914 1864
1915 1865 if merge and cctx.deleted():
1916 1866 raise error.Abort(_("cannot commit merge with missing files"))
1917 1867
1918 1868 ms = mergemod.mergestate.read(self)
1919 1869 mergeutil.checkunresolved(ms)
1920 1870
1921 1871 if editor:
1922 1872 cctx._text = editor(self, cctx, subs)
1923 1873 edited = (text != cctx._text)
1924 1874
1925 1875 # Save commit message in case this transaction gets rolled back
1926 1876 # (e.g. by a pretxncommit hook). Leave the content alone on
1927 1877 # the assumption that the user will use the same editor again.
1928 1878 msgfn = self.savecommitmessage(cctx._text)
1929 1879
1930 1880 # commit subs and write new state
1931 1881 if subs:
1932 1882 for s in sorted(commitsubs):
1933 1883 sub = wctx.sub(s)
1934 1884 self.ui.status(_('committing subrepository %s\n') %
1935 1885 subrepo.subrelpath(sub))
1936 1886 sr = sub.commit(cctx._text, user, date)
1937 1887 newstate[s] = (newstate[s][0], sr)
1938 1888 subrepo.writestate(self, newstate)
1939 1889
1940 1890 p1, p2 = self.dirstate.parents()
1941 1891 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1942 1892 try:
1943 1893 self.hook("precommit", throw=True, parent1=hookp1,
1944 1894 parent2=hookp2)
1945 1895 tr = self.transaction('commit')
1946 1896 ret = self.commitctx(cctx, True)
1947 1897 except: # re-raises
1948 1898 if edited:
1949 1899 self.ui.write(
1950 1900 _('note: commit message saved in %s\n') % msgfn)
1951 1901 raise
1952 1902 # update bookmarks, dirstate and mergestate
1953 1903 bookmarks.update(self, [p1, p2], ret)
1954 1904 cctx.markcommitted(ret)
1955 1905 ms.reset()
1956 1906 tr.close()
1957 1907
1958 1908 finally:
1959 1909 lockmod.release(tr, lock, wlock)
1960 1910
1961 1911 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1962 1912 # hack for command that use a temporary commit (eg: histedit)
1963 1913 # temporary commit got stripped before hook release
1964 1914 if self.changelog.hasnode(ret):
1965 1915 self.hook("commit", node=node, parent1=parent1,
1966 1916 parent2=parent2)
1967 1917 self._afterlock(commithook)
1968 1918 return ret
1969 1919
1970 1920 @unfilteredmethod
1971 1921 def commitctx(self, ctx, error=False):
1972 1922 """Add a new revision to current repository.
1973 1923 Revision information is passed via the context argument.
1974 1924 """
1975 1925
1976 1926 tr = None
1977 1927 p1, p2 = ctx.p1(), ctx.p2()
1978 1928 user = ctx.user()
1979 1929
1980 1930 lock = self.lock()
1981 1931 try:
1982 1932 tr = self.transaction("commit")
1983 1933 trp = weakref.proxy(tr)
1984 1934
1985 1935 if ctx.manifestnode():
1986 1936 # reuse an existing manifest revision
1987 1937 mn = ctx.manifestnode()
1988 1938 files = ctx.files()
1989 1939 elif ctx.files():
1990 1940 m1ctx = p1.manifestctx()
1991 1941 m2ctx = p2.manifestctx()
1992 1942 mctx = m1ctx.copy()
1993 1943
1994 1944 m = mctx.read()
1995 1945 m1 = m1ctx.read()
1996 1946 m2 = m2ctx.read()
1997 1947
1998 1948 # check in files
1999 1949 added = []
2000 1950 changed = []
2001 1951 removed = list(ctx.removed())
2002 1952 linkrev = len(self)
2003 1953 self.ui.note(_("committing files:\n"))
2004 1954 for f in sorted(ctx.modified() + ctx.added()):
2005 1955 self.ui.note(f + "\n")
2006 1956 try:
2007 1957 fctx = ctx[f]
2008 1958 if fctx is None:
2009 1959 removed.append(f)
2010 1960 else:
2011 1961 added.append(f)
2012 1962 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2013 1963 trp, changed)
2014 1964 m.setflag(f, fctx.flags())
2015 1965 except OSError as inst:
2016 1966 self.ui.warn(_("trouble committing %s!\n") % f)
2017 1967 raise
2018 1968 except IOError as inst:
2019 1969 errcode = getattr(inst, 'errno', errno.ENOENT)
2020 1970 if error or errcode and errcode != errno.ENOENT:
2021 1971 self.ui.warn(_("trouble committing %s!\n") % f)
2022 1972 raise
2023 1973
2024 1974 # update manifest
2025 1975 self.ui.note(_("committing manifest\n"))
2026 1976 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2027 1977 drop = [f for f in removed if f in m]
2028 1978 for f in drop:
2029 1979 del m[f]
2030 1980 mn = mctx.write(trp, linkrev,
2031 1981 p1.manifestnode(), p2.manifestnode(),
2032 1982 added, drop)
2033 1983 files = changed + removed
2034 1984 else:
2035 1985 mn = p1.manifestnode()
2036 1986 files = []
2037 1987
2038 1988 # update changelog
2039 1989 self.ui.note(_("committing changelog\n"))
2040 1990 self.changelog.delayupdate(tr)
2041 1991 n = self.changelog.add(mn, files, ctx.description(),
2042 1992 trp, p1.node(), p2.node(),
2043 1993 user, ctx.date(), ctx.extra().copy())
2044 1994 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2045 1995 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2046 1996 parent2=xp2)
2047 1997 # set the new commit is proper phase
2048 1998 targetphase = subrepo.newcommitphase(self.ui, ctx)
2049 1999 if targetphase:
2050 2000 # retract boundary do not alter parent changeset.
2051 2001 # if a parent have higher the resulting phase will
2052 2002 # be compliant anyway
2053 2003 #
2054 2004 # if minimal phase was 0 we don't need to retract anything
2055 2005 phases.registernew(self, tr, targetphase, [n])
2056 2006 tr.close()
2057 2007 return n
2058 2008 finally:
2059 2009 if tr:
2060 2010 tr.release()
2061 2011 lock.release()
2062 2012
2063 2013 @unfilteredmethod
2064 2014 def destroying(self):
2065 2015 '''Inform the repository that nodes are about to be destroyed.
2066 2016 Intended for use by strip and rollback, so there's a common
2067 2017 place for anything that has to be done before destroying history.
2068 2018
2069 2019 This is mostly useful for saving state that is in memory and waiting
2070 2020 to be flushed when the current lock is released. Because a call to
2071 2021 destroyed is imminent, the repo will be invalidated causing those
2072 2022 changes to stay in memory (waiting for the next unlock), or vanish
2073 2023 completely.
2074 2024 '''
2075 2025 # When using the same lock to commit and strip, the phasecache is left
2076 2026 # dirty after committing. Then when we strip, the repo is invalidated,
2077 2027 # causing those changes to disappear.
2078 2028 if '_phasecache' in vars(self):
2079 2029 self._phasecache.write()
2080 2030
2081 2031 @unfilteredmethod
2082 2032 def destroyed(self):
2083 2033 '''Inform the repository that nodes have been destroyed.
2084 2034 Intended for use by strip and rollback, so there's a common
2085 2035 place for anything that has to be done after destroying history.
2086 2036 '''
2087 2037 # When one tries to:
2088 2038 # 1) destroy nodes thus calling this method (e.g. strip)
2089 2039 # 2) use phasecache somewhere (e.g. commit)
2090 2040 #
2091 2041 # then 2) will fail because the phasecache contains nodes that were
2092 2042 # removed. We can either remove phasecache from the filecache,
2093 2043 # causing it to reload next time it is accessed, or simply filter
2094 2044 # the removed nodes now and write the updated cache.
2095 2045 self._phasecache.filterunknown(self)
2096 2046 self._phasecache.write()
2097 2047
2098 2048 # refresh all repository caches
2099 2049 self.updatecaches()
2100 2050
2101 2051 # Ensure the persistent tag cache is updated. Doing it now
2102 2052 # means that the tag cache only has to worry about destroyed
2103 2053 # heads immediately after a strip/rollback. That in turn
2104 2054 # guarantees that "cachetip == currenttip" (comparing both rev
2105 2055 # and node) always means no nodes have been added or destroyed.
2106 2056
2107 2057 # XXX this is suboptimal when qrefresh'ing: we strip the current
2108 2058 # head, refresh the tag cache, then immediately add a new head.
2109 2059 # But I think doing it this way is necessary for the "instant
2110 2060 # tag cache retrieval" case to work.
2111 2061 self.invalidate()
2112 2062
2113 2063 def walk(self, match, node=None):
2114 2064 '''
2115 2065 walk recursively through the directory tree or a given
2116 2066 changeset, finding all files matched by the match
2117 2067 function
2118 2068 '''
2119 2069 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2120 2070 return self[node].walk(match)
2121 2071
2122 2072 def status(self, node1='.', node2=None, match=None,
2123 2073 ignored=False, clean=False, unknown=False,
2124 2074 listsubrepos=False):
2125 2075 '''a convenience method that calls node1.status(node2)'''
2126 2076 return self[node1].status(node2, match, ignored, clean, unknown,
2127 2077 listsubrepos)
2128 2078
2129 2079 def addpostdsstatus(self, ps):
2130 2080 """Add a callback to run within the wlock, at the point at which status
2131 2081 fixups happen.
2132 2082
2133 2083 On status completion, callback(wctx, status) will be called with the
2134 2084 wlock held, unless the dirstate has changed from underneath or the wlock
2135 2085 couldn't be grabbed.
2136 2086
2137 2087 Callbacks should not capture and use a cached copy of the dirstate --
2138 2088 it might change in the meanwhile. Instead, they should access the
2139 2089 dirstate via wctx.repo().dirstate.
2140 2090
2141 2091 This list is emptied out after each status run -- extensions should
2142 2092 make sure it adds to this list each time dirstate.status is called.
2143 2093 Extensions should also make sure they don't call this for statuses
2144 2094 that don't involve the dirstate.
2145 2095 """
2146 2096
2147 2097 # The list is located here for uniqueness reasons -- it is actually
2148 2098 # managed by the workingctx, but that isn't unique per-repo.
2149 2099 self._postdsstatus.append(ps)
2150 2100
2151 2101 def postdsstatus(self):
2152 2102 """Used by workingctx to get the list of post-dirstate-status hooks."""
2153 2103 return self._postdsstatus
2154 2104
2155 2105 def clearpostdsstatus(self):
2156 2106 """Used by workingctx to clear post-dirstate-status hooks."""
2157 2107 del self._postdsstatus[:]
2158 2108
2159 2109 def heads(self, start=None):
2160 2110 if start is None:
2161 2111 cl = self.changelog
2162 2112 headrevs = reversed(cl.headrevs())
2163 2113 return [cl.node(rev) for rev in headrevs]
2164 2114
2165 2115 heads = self.changelog.heads(start)
2166 2116 # sort the output in rev descending order
2167 2117 return sorted(heads, key=self.changelog.rev, reverse=True)
2168 2118
2169 2119 def branchheads(self, branch=None, start=None, closed=False):
2170 2120 '''return a (possibly filtered) list of heads for the given branch
2171 2121
2172 2122 Heads are returned in topological order, from newest to oldest.
2173 2123 If branch is None, use the dirstate branch.
2174 2124 If start is not None, return only heads reachable from start.
2175 2125 If closed is True, return heads that are marked as closed as well.
2176 2126 '''
2177 2127 if branch is None:
2178 2128 branch = self[None].branch()
2179 2129 branches = self.branchmap()
2180 2130 if branch not in branches:
2181 2131 return []
2182 2132 # the cache returns heads ordered lowest to highest
2183 2133 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2184 2134 if start is not None:
2185 2135 # filter out the heads that cannot be reached from startrev
2186 2136 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2187 2137 bheads = [h for h in bheads if h in fbheads]
2188 2138 return bheads
2189 2139
2190 2140 def branches(self, nodes):
2191 2141 if not nodes:
2192 2142 nodes = [self.changelog.tip()]
2193 2143 b = []
2194 2144 for n in nodes:
2195 2145 t = n
2196 2146 while True:
2197 2147 p = self.changelog.parents(n)
2198 2148 if p[1] != nullid or p[0] == nullid:
2199 2149 b.append((t, n, p[0], p[1]))
2200 2150 break
2201 2151 n = p[0]
2202 2152 return b
2203 2153
2204 2154 def between(self, pairs):
2205 2155 r = []
2206 2156
2207 2157 for top, bottom in pairs:
2208 2158 n, l, i = top, [], 0
2209 2159 f = 1
2210 2160
2211 2161 while n != bottom and n != nullid:
2212 2162 p = self.changelog.parents(n)[0]
2213 2163 if i == f:
2214 2164 l.append(n)
2215 2165 f = f * 2
2216 2166 n = p
2217 2167 i += 1
2218 2168
2219 2169 r.append(l)
2220 2170
2221 2171 return r
2222 2172
2223 2173 def checkpush(self, pushop):
2224 2174 """Extensions can override this function if additional checks have
2225 2175 to be performed before pushing, or call it if they override push
2226 2176 command.
2227 2177 """
2228 2178
2229 2179 @unfilteredpropertycache
2230 2180 def prepushoutgoinghooks(self):
2231 2181 """Return util.hooks consists of a pushop with repo, remote, outgoing
2232 2182 methods, which are called before pushing changesets.
2233 2183 """
2234 2184 return util.hooks()
2235 2185
2236 2186 def pushkey(self, namespace, key, old, new):
2237 2187 try:
2238 2188 tr = self.currenttransaction()
2239 2189 hookargs = {}
2240 2190 if tr is not None:
2241 2191 hookargs.update(tr.hookargs)
2242 2192 hookargs['namespace'] = namespace
2243 2193 hookargs['key'] = key
2244 2194 hookargs['old'] = old
2245 2195 hookargs['new'] = new
2246 2196 self.hook('prepushkey', throw=True, **hookargs)
2247 2197 except error.HookAbort as exc:
2248 2198 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2249 2199 if exc.hint:
2250 2200 self.ui.write_err(_("(%s)\n") % exc.hint)
2251 2201 return False
2252 2202 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2253 2203 ret = pushkey.push(self, namespace, key, old, new)
2254 2204 def runhook():
2255 2205 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2256 2206 ret=ret)
2257 2207 self._afterlock(runhook)
2258 2208 return ret
2259 2209
2260 2210 def listkeys(self, namespace):
2261 2211 self.hook('prelistkeys', throw=True, namespace=namespace)
2262 2212 self.ui.debug('listing keys for "%s"\n' % namespace)
2263 2213 values = pushkey.list(self, namespace)
2264 2214 self.hook('listkeys', namespace=namespace, values=values)
2265 2215 return values
2266 2216
2267 2217 def debugwireargs(self, one, two, three=None, four=None, five=None):
2268 2218 '''used to test argument passing over the wire'''
2269 2219 return "%s %s %s %s %s" % (one, two, three, four, five)
2270 2220
2271 2221 def savecommitmessage(self, text):
2272 2222 fp = self.vfs('last-message.txt', 'wb')
2273 2223 try:
2274 2224 fp.write(text)
2275 2225 finally:
2276 2226 fp.close()
2277 2227 return self.pathto(fp.name[len(self.root) + 1:])
2278 2228
2279 2229 # used to avoid circular references so destructors work
2280 2230 def aftertrans(files):
2281 2231 renamefiles = [tuple(t) for t in files]
2282 2232 def a():
2283 2233 for vfs, src, dest in renamefiles:
2284 2234 # if src and dest refer to a same file, vfs.rename is a no-op,
2285 2235 # leaving both src and dest on disk. delete dest to make sure
2286 2236 # the rename couldn't be such a no-op.
2287 2237 vfs.tryunlink(dest)
2288 2238 try:
2289 2239 vfs.rename(src, dest)
2290 2240 except OSError: # journal file does not yet exist
2291 2241 pass
2292 2242 return a
2293 2243
2294 2244 def undoname(fn):
2295 2245 base, name = os.path.split(fn)
2296 2246 assert name.startswith('journal')
2297 2247 return os.path.join(base, name.replace('journal', 'undo', 1))
2298 2248
2299 2249 def instance(ui, path, create):
2300 2250 return localrepository(ui, util.urllocalpath(path), create)
2301 2251
2302 2252 def islocal(path):
2303 2253 return True
2304 2254
2305 2255 def newreporequirements(repo):
2306 2256 """Determine the set of requirements for a new local repository.
2307 2257
2308 2258 Extensions can wrap this function to specify custom requirements for
2309 2259 new repositories.
2310 2260 """
2311 2261 ui = repo.ui
2312 2262 requirements = {'revlogv1'}
2313 2263 if ui.configbool('format', 'usestore'):
2314 2264 requirements.add('store')
2315 2265 if ui.configbool('format', 'usefncache'):
2316 2266 requirements.add('fncache')
2317 2267 if ui.configbool('format', 'dotencode'):
2318 2268 requirements.add('dotencode')
2319 2269
2320 2270 compengine = ui.config('experimental', 'format.compression')
2321 2271 if compengine not in util.compengines:
2322 2272 raise error.Abort(_('compression engine %s defined by '
2323 2273 'experimental.format.compression not available') %
2324 2274 compengine,
2325 2275 hint=_('run "hg debuginstall" to list available '
2326 2276 'compression engines'))
2327 2277
2328 2278 # zlib is the historical default and doesn't need an explicit requirement.
2329 2279 if compengine != 'zlib':
2330 2280 requirements.add('exp-compression-%s' % compengine)
2331 2281
2332 2282 if scmutil.gdinitconfig(ui):
2333 2283 requirements.add('generaldelta')
2334 2284 if ui.configbool('experimental', 'treemanifest'):
2335 2285 requirements.add('treemanifest')
2336 2286 if ui.configbool('experimental', 'manifestv2'):
2337 2287 requirements.add('manifestv2')
2338 2288
2339 2289 revlogv2 = ui.config('experimental', 'revlogv2')
2340 2290 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2341 2291 requirements.remove('revlogv1')
2342 2292 # generaldelta is implied by revlogv2.
2343 2293 requirements.discard('generaldelta')
2344 2294 requirements.add(REVLOGV2_REQUIREMENT)
2345 2295
2346 2296 return requirements
@@ -1,2063 +1,2128
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import posixpath
15 15 import re
16 16 import stat
17 17 import subprocess
18 18 import sys
19 19 import tarfile
20 20 import xml.dom.minidom
21 21
22 22
23 23 from .i18n import _
24 24 from . import (
25 25 cmdutil,
26 26 config,
27 27 encoding,
28 28 error,
29 29 exchange,
30 30 filemerge,
31 31 match as matchmod,
32 32 node,
33 33 pathutil,
34 34 phases,
35 35 pycompat,
36 36 scmutil,
37 37 util,
38 38 vfs as vfsmod,
39 39 )
40 40
41 41 hg = None
42 42 propertycache = util.propertycache
43 43
44 44 nullstate = ('', '', 'empty')
45 45
46 46 def _expandedabspath(path):
47 47 '''
48 48 get a path or url and if it is a path expand it and return an absolute path
49 49 '''
50 50 expandedpath = util.urllocalpath(util.expandpath(path))
51 51 u = util.url(expandedpath)
52 52 if not u.scheme:
53 53 path = util.normpath(os.path.abspath(u.path))
54 54 return path
55 55
56 56 def _getstorehashcachename(remotepath):
57 57 '''get a unique filename for the store hash cache of a remote repository'''
58 58 return hashlib.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
59 59
60 60 class SubrepoAbort(error.Abort):
61 61 """Exception class used to avoid handling a subrepo error more than once"""
62 62 def __init__(self, *args, **kw):
63 63 self.subrepo = kw.pop('subrepo', None)
64 64 self.cause = kw.pop('cause', None)
65 65 error.Abort.__init__(self, *args, **kw)
66 66
67 67 def annotatesubrepoerror(func):
68 68 def decoratedmethod(self, *args, **kargs):
69 69 try:
70 70 res = func(self, *args, **kargs)
71 71 except SubrepoAbort as ex:
72 72 # This exception has already been handled
73 73 raise ex
74 74 except error.Abort as ex:
75 75 subrepo = subrelpath(self)
76 76 errormsg = str(ex) + ' ' + _('(in subrepository "%s")') % subrepo
77 77 # avoid handling this exception by raising a SubrepoAbort exception
78 78 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
79 79 cause=sys.exc_info())
80 80 return res
81 81 return decoratedmethod
82 82
83 83 def state(ctx, ui):
84 84 """return a state dict, mapping subrepo paths configured in .hgsub
85 85 to tuple: (source from .hgsub, revision from .hgsubstate, kind
86 86 (key in types dict))
87 87 """
88 88 p = config.config()
89 89 repo = ctx.repo()
90 90 def read(f, sections=None, remap=None):
91 91 if f in ctx:
92 92 try:
93 93 data = ctx[f].data()
94 94 except IOError as err:
95 95 if err.errno != errno.ENOENT:
96 96 raise
97 97 # handle missing subrepo spec files as removed
98 98 ui.warn(_("warning: subrepo spec file \'%s\' not found\n") %
99 99 repo.pathto(f))
100 100 return
101 101 p.parse(f, data, sections, remap, read)
102 102 else:
103 103 raise error.Abort(_("subrepo spec file \'%s\' not found") %
104 104 repo.pathto(f))
105 105 if '.hgsub' in ctx:
106 106 read('.hgsub')
107 107
108 108 for path, src in ui.configitems('subpaths'):
109 109 p.set('subpaths', path, src, ui.configsource('subpaths', path))
110 110
111 111 rev = {}
112 112 if '.hgsubstate' in ctx:
113 113 try:
114 114 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
115 115 l = l.lstrip()
116 116 if not l:
117 117 continue
118 118 try:
119 119 revision, path = l.split(" ", 1)
120 120 except ValueError:
121 121 raise error.Abort(_("invalid subrepository revision "
122 122 "specifier in \'%s\' line %d")
123 123 % (repo.pathto('.hgsubstate'), (i + 1)))
124 124 rev[path] = revision
125 125 except IOError as err:
126 126 if err.errno != errno.ENOENT:
127 127 raise
128 128
129 129 def remap(src):
130 130 for pattern, repl in p.items('subpaths'):
131 131 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
132 132 # does a string decode.
133 133 repl = util.escapestr(repl)
134 134 # However, we still want to allow back references to go
135 135 # through unharmed, so we turn r'\\1' into r'\1'. Again,
136 136 # extra escapes are needed because re.sub string decodes.
137 137 repl = re.sub(br'\\\\([0-9]+)', br'\\\1', repl)
138 138 try:
139 139 src = re.sub(pattern, repl, src, 1)
140 140 except re.error as e:
141 141 raise error.Abort(_("bad subrepository pattern in %s: %s")
142 142 % (p.source('subpaths', pattern), e))
143 143 return src
144 144
145 145 state = {}
146 146 for path, src in p[''].items():
147 147 kind = 'hg'
148 148 if src.startswith('['):
149 149 if ']' not in src:
150 150 raise error.Abort(_('missing ] in subrepository source'))
151 151 kind, src = src.split(']', 1)
152 152 kind = kind[1:]
153 153 src = src.lstrip() # strip any extra whitespace after ']'
154 154
155 155 if not util.url(src).isabs():
156 156 parent = _abssource(repo, abort=False)
157 157 if parent:
158 158 parent = util.url(parent)
159 159 parent.path = posixpath.join(parent.path or '', src)
160 160 parent.path = posixpath.normpath(parent.path)
161 161 joined = str(parent)
162 162 # Remap the full joined path and use it if it changes,
163 163 # else remap the original source.
164 164 remapped = remap(joined)
165 165 if remapped == joined:
166 166 src = remap(src)
167 167 else:
168 168 src = remapped
169 169
170 170 src = remap(src)
171 171 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
172 172
173 173 return state
174 174
175 175 def writestate(repo, state):
176 176 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
177 177 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)
178 178 if state[s][1] != nullstate[1]]
179 179 repo.wwrite('.hgsubstate', ''.join(lines), '')
180 180
181 181 def submerge(repo, wctx, mctx, actx, overwrite, labels=None):
182 182 """delegated from merge.applyupdates: merging of .hgsubstate file
183 183 in working context, merging context and ancestor context"""
184 184 if mctx == actx: # backwards?
185 185 actx = wctx.p1()
186 186 s1 = wctx.substate
187 187 s2 = mctx.substate
188 188 sa = actx.substate
189 189 sm = {}
190 190
191 191 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
192 192
193 193 def debug(s, msg, r=""):
194 194 if r:
195 195 r = "%s:%s:%s" % r
196 196 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
197 197
198 198 promptssrc = filemerge.partextras(labels)
199 199 for s, l in sorted(s1.iteritems()):
200 200 prompts = None
201 201 a = sa.get(s, nullstate)
202 202 ld = l # local state with possible dirty flag for compares
203 203 if wctx.sub(s).dirty():
204 204 ld = (l[0], l[1] + "+")
205 205 if wctx == actx: # overwrite
206 206 a = ld
207 207
208 208 prompts = promptssrc.copy()
209 209 prompts['s'] = s
210 210 if s in s2:
211 211 r = s2[s]
212 212 if ld == r or r == a: # no change or local is newer
213 213 sm[s] = l
214 214 continue
215 215 elif ld == a: # other side changed
216 216 debug(s, "other changed, get", r)
217 217 wctx.sub(s).get(r, overwrite)
218 218 sm[s] = r
219 219 elif ld[0] != r[0]: # sources differ
220 220 prompts['lo'] = l[0]
221 221 prompts['ro'] = r[0]
222 222 if repo.ui.promptchoice(
223 223 _(' subrepository sources for %(s)s differ\n'
224 224 'use (l)ocal%(l)s source (%(lo)s)'
225 225 ' or (r)emote%(o)s source (%(ro)s)?'
226 226 '$$ &Local $$ &Remote') % prompts, 0):
227 227 debug(s, "prompt changed, get", r)
228 228 wctx.sub(s).get(r, overwrite)
229 229 sm[s] = r
230 230 elif ld[1] == a[1]: # local side is unchanged
231 231 debug(s, "other side changed, get", r)
232 232 wctx.sub(s).get(r, overwrite)
233 233 sm[s] = r
234 234 else:
235 235 debug(s, "both sides changed")
236 236 srepo = wctx.sub(s)
237 237 prompts['sl'] = srepo.shortid(l[1])
238 238 prompts['sr'] = srepo.shortid(r[1])
239 239 option = repo.ui.promptchoice(
240 240 _(' subrepository %(s)s diverged (local revision: %(sl)s, '
241 241 'remote revision: %(sr)s)\n'
242 242 '(M)erge, keep (l)ocal%(l)s or keep (r)emote%(o)s?'
243 243 '$$ &Merge $$ &Local $$ &Remote')
244 244 % prompts, 0)
245 245 if option == 0:
246 246 wctx.sub(s).merge(r)
247 247 sm[s] = l
248 248 debug(s, "merge with", r)
249 249 elif option == 1:
250 250 sm[s] = l
251 251 debug(s, "keep local subrepo revision", l)
252 252 else:
253 253 wctx.sub(s).get(r, overwrite)
254 254 sm[s] = r
255 255 debug(s, "get remote subrepo revision", r)
256 256 elif ld == a: # remote removed, local unchanged
257 257 debug(s, "remote removed, remove")
258 258 wctx.sub(s).remove()
259 259 elif a == nullstate: # not present in remote or ancestor
260 260 debug(s, "local added, keep")
261 261 sm[s] = l
262 262 continue
263 263 else:
264 264 if repo.ui.promptchoice(
265 265 _(' local%(l)s changed subrepository %(s)s'
266 266 ' which remote%(o)s removed\n'
267 267 'use (c)hanged version or (d)elete?'
268 268 '$$ &Changed $$ &Delete') % prompts, 0):
269 269 debug(s, "prompt remove")
270 270 wctx.sub(s).remove()
271 271
272 272 for s, r in sorted(s2.items()):
273 273 prompts = None
274 274 if s in s1:
275 275 continue
276 276 elif s not in sa:
277 277 debug(s, "remote added, get", r)
278 278 mctx.sub(s).get(r)
279 279 sm[s] = r
280 280 elif r != sa[s]:
281 281 prompts = promptssrc.copy()
282 282 prompts['s'] = s
283 283 if repo.ui.promptchoice(
284 284 _(' remote%(o)s changed subrepository %(s)s'
285 285 ' which local%(l)s removed\n'
286 286 'use (c)hanged version or (d)elete?'
287 287 '$$ &Changed $$ &Delete') % prompts, 0) == 0:
288 288 debug(s, "prompt recreate", r)
289 289 mctx.sub(s).get(r)
290 290 sm[s] = r
291 291
292 292 # record merged .hgsubstate
293 293 writestate(repo, sm)
294 294 return sm
295 295
296 def precommit(ui, wctx, status, match, force=False):
297 """Calculate .hgsubstate changes that should be applied before committing
298
299 Returns (subs, commitsubs, newstate) where
300 - subs: changed subrepos (including dirty ones)
301 - commitsubs: dirty subrepos which the caller needs to commit recursively
302 - newstate: new state dict which the caller must write to .hgsubstate
303
304 This also updates the given status argument.
305 """
306 subs = []
307 commitsubs = set()
308 newstate = wctx.substate.copy()
309
310 # only manage subrepos and .hgsubstate if .hgsub is present
311 if '.hgsub' in wctx:
312 # we'll decide whether to track this ourselves, thanks
313 for c in status.modified, status.added, status.removed:
314 if '.hgsubstate' in c:
315 c.remove('.hgsubstate')
316
317 # compare current state to last committed state
318 # build new substate based on last committed state
319 oldstate = wctx.p1().substate
320 for s in sorted(newstate.keys()):
321 if not match(s):
322 # ignore working copy, use old state if present
323 if s in oldstate:
324 newstate[s] = oldstate[s]
325 continue
326 if not force:
327 raise error.Abort(
328 _("commit with new subrepo %s excluded") % s)
329 dirtyreason = wctx.sub(s).dirtyreason(True)
330 if dirtyreason:
331 if not ui.configbool('ui', 'commitsubrepos'):
332 raise error.Abort(dirtyreason,
333 hint=_("use --subrepos for recursive commit"))
334 subs.append(s)
335 commitsubs.add(s)
336 else:
337 bs = wctx.sub(s).basestate()
338 newstate[s] = (newstate[s][0], bs, newstate[s][2])
339 if oldstate.get(s, (None, None, None))[1] != bs:
340 subs.append(s)
341
342 # check for removed subrepos
343 for p in wctx.parents():
344 r = [s for s in p.substate if s not in newstate]
345 subs += [s for s in r if match(s)]
346 if subs:
347 if (not match('.hgsub') and
348 '.hgsub' in (wctx.modified() + wctx.added())):
349 raise error.Abort(_("can't commit subrepos without .hgsub"))
350 status.modified.insert(0, '.hgsubstate')
351
352 elif '.hgsub' in status.removed:
353 # clean up .hgsubstate when .hgsub is removed
354 if ('.hgsubstate' in wctx and
355 '.hgsubstate' not in (status.modified + status.added +
356 status.removed)):
357 status.removed.insert(0, '.hgsubstate')
358
359 return subs, commitsubs, newstate
360
296 361 def _updateprompt(ui, sub, dirty, local, remote):
297 362 if dirty:
298 363 msg = (_(' subrepository sources for %s differ\n'
299 364 'use (l)ocal source (%s) or (r)emote source (%s)?'
300 365 '$$ &Local $$ &Remote')
301 366 % (subrelpath(sub), local, remote))
302 367 else:
303 368 msg = (_(' subrepository sources for %s differ (in checked out '
304 369 'version)\n'
305 370 'use (l)ocal source (%s) or (r)emote source (%s)?'
306 371 '$$ &Local $$ &Remote')
307 372 % (subrelpath(sub), local, remote))
308 373 return ui.promptchoice(msg, 0)
309 374
310 375 def reporelpath(repo):
311 376 """return path to this (sub)repo as seen from outermost repo"""
312 377 parent = repo
313 378 while util.safehasattr(parent, '_subparent'):
314 379 parent = parent._subparent
315 380 return repo.root[len(pathutil.normasprefix(parent.root)):]
316 381
317 382 def subrelpath(sub):
318 383 """return path to this subrepo as seen from outermost repo"""
319 384 return sub._relpath
320 385
321 386 def _abssource(repo, push=False, abort=True):
322 387 """return pull/push path of repo - either based on parent repo .hgsub info
323 388 or on the top repo config. Abort or return None if no source found."""
324 389 if util.safehasattr(repo, '_subparent'):
325 390 source = util.url(repo._subsource)
326 391 if source.isabs():
327 392 return str(source)
328 393 source.path = posixpath.normpath(source.path)
329 394 parent = _abssource(repo._subparent, push, abort=False)
330 395 if parent:
331 396 parent = util.url(util.pconvert(parent))
332 397 parent.path = posixpath.join(parent.path or '', source.path)
333 398 parent.path = posixpath.normpath(parent.path)
334 399 return str(parent)
335 400 else: # recursion reached top repo
336 401 if util.safehasattr(repo, '_subtoppath'):
337 402 return repo._subtoppath
338 403 if push and repo.ui.config('paths', 'default-push'):
339 404 return repo.ui.config('paths', 'default-push')
340 405 if repo.ui.config('paths', 'default'):
341 406 return repo.ui.config('paths', 'default')
342 407 if repo.shared():
343 408 # chop off the .hg component to get the default path form
344 409 return os.path.dirname(repo.sharedpath)
345 410 if abort:
346 411 raise error.Abort(_("default path for subrepository not found"))
347 412
348 413 def _sanitize(ui, vfs, ignore):
349 414 for dirname, dirs, names in vfs.walk():
350 415 for i, d in enumerate(dirs):
351 416 if d.lower() == ignore:
352 417 del dirs[i]
353 418 break
354 419 if vfs.basename(dirname).lower() != '.hg':
355 420 continue
356 421 for f in names:
357 422 if f.lower() == 'hgrc':
358 423 ui.warn(_("warning: removing potentially hostile 'hgrc' "
359 424 "in '%s'\n") % vfs.join(dirname))
360 425 vfs.unlink(vfs.reljoin(dirname, f))
361 426
362 427 def _auditsubrepopath(repo, path):
363 428 # auditor doesn't check if the path itself is a symlink
364 429 pathutil.pathauditor(repo.root)(path)
365 430 if repo.wvfs.islink(path):
366 431 raise error.Abort(_("subrepo '%s' traverses symbolic link") % path)
367 432
368 433 SUBREPO_ALLOWED_DEFAULTS = {
369 434 'hg': True,
370 435 'git': False,
371 436 'svn': False,
372 437 }
373 438
374 439 def _checktype(ui, kind):
375 440 # subrepos.allowed is a master kill switch. If disabled, subrepos are
376 441 # disabled period.
377 442 if not ui.configbool('subrepos', 'allowed', True):
378 443 raise error.Abort(_('subrepos not enabled'),
379 444 hint=_("see 'hg help config.subrepos' for details"))
380 445
381 446 default = SUBREPO_ALLOWED_DEFAULTS.get(kind, False)
382 447 if not ui.configbool('subrepos', '%s:allowed' % kind, default):
383 448 raise error.Abort(_('%s subrepos not allowed') % kind,
384 449 hint=_("see 'hg help config.subrepos' for details"))
385 450
386 451 if kind not in types:
387 452 raise error.Abort(_('unknown subrepo type %s') % kind)
388 453
389 454 def subrepo(ctx, path, allowwdir=False, allowcreate=True):
390 455 """return instance of the right subrepo class for subrepo in path"""
391 456 # subrepo inherently violates our import layering rules
392 457 # because it wants to make repo objects from deep inside the stack
393 458 # so we manually delay the circular imports to not break
394 459 # scripts that don't use our demand-loading
395 460 global hg
396 461 from . import hg as h
397 462 hg = h
398 463
399 464 repo = ctx.repo()
400 465 _auditsubrepopath(repo, path)
401 466 state = ctx.substate[path]
402 467 _checktype(repo.ui, state[2])
403 468 if allowwdir:
404 469 state = (state[0], ctx.subrev(path), state[2])
405 470 return types[state[2]](ctx, path, state[:2], allowcreate)
406 471
407 472 def nullsubrepo(ctx, path, pctx):
408 473 """return an empty subrepo in pctx for the extant subrepo in ctx"""
409 474 # subrepo inherently violates our import layering rules
410 475 # because it wants to make repo objects from deep inside the stack
411 476 # so we manually delay the circular imports to not break
412 477 # scripts that don't use our demand-loading
413 478 global hg
414 479 from . import hg as h
415 480 hg = h
416 481
417 482 repo = ctx.repo()
418 483 _auditsubrepopath(repo, path)
419 484 state = ctx.substate[path]
420 485 _checktype(repo.ui, state[2])
421 486 subrev = ''
422 487 if state[2] == 'hg':
423 488 subrev = "0" * 40
424 489 return types[state[2]](pctx, path, (state[0], subrev), True)
425 490
426 491 def newcommitphase(ui, ctx):
427 492 commitphase = phases.newcommitphase(ui)
428 493 substate = getattr(ctx, "substate", None)
429 494 if not substate:
430 495 return commitphase
431 496 check = ui.config('phases', 'checksubrepos')
432 497 if check not in ('ignore', 'follow', 'abort'):
433 498 raise error.Abort(_('invalid phases.checksubrepos configuration: %s')
434 499 % (check))
435 500 if check == 'ignore':
436 501 return commitphase
437 502 maxphase = phases.public
438 503 maxsub = None
439 504 for s in sorted(substate):
440 505 sub = ctx.sub(s)
441 506 subphase = sub.phase(substate[s][1])
442 507 if maxphase < subphase:
443 508 maxphase = subphase
444 509 maxsub = s
445 510 if commitphase < maxphase:
446 511 if check == 'abort':
447 512 raise error.Abort(_("can't commit in %s phase"
448 513 " conflicting %s from subrepository %s") %
449 514 (phases.phasenames[commitphase],
450 515 phases.phasenames[maxphase], maxsub))
451 516 ui.warn(_("warning: changes are committed in"
452 517 " %s phase from subrepository %s\n") %
453 518 (phases.phasenames[maxphase], maxsub))
454 519 return maxphase
455 520 return commitphase
456 521
457 522 # subrepo classes need to implement the following abstract class:
458 523
459 524 class abstractsubrepo(object):
460 525
461 526 def __init__(self, ctx, path):
462 527 """Initialize abstractsubrepo part
463 528
464 529 ``ctx`` is the context referring this subrepository in the
465 530 parent repository.
466 531
467 532 ``path`` is the path to this subrepository as seen from
468 533 innermost repository.
469 534 """
470 535 self.ui = ctx.repo().ui
471 536 self._ctx = ctx
472 537 self._path = path
473 538
474 539 def addwebdirpath(self, serverpath, webconf):
475 540 """Add the hgwebdir entries for this subrepo, and any of its subrepos.
476 541
477 542 ``serverpath`` is the path component of the URL for this repo.
478 543
479 544 ``webconf`` is the dictionary of hgwebdir entries.
480 545 """
481 546 pass
482 547
483 548 def storeclean(self, path):
484 549 """
485 550 returns true if the repository has not changed since it was last
486 551 cloned from or pushed to a given repository.
487 552 """
488 553 return False
489 554
490 555 def dirty(self, ignoreupdate=False, missing=False):
491 556 """returns true if the dirstate of the subrepo is dirty or does not
492 557 match current stored state. If ignoreupdate is true, only check
493 558 whether the subrepo has uncommitted changes in its dirstate. If missing
494 559 is true, check for deleted files.
495 560 """
496 561 raise NotImplementedError
497 562
498 563 def dirtyreason(self, ignoreupdate=False, missing=False):
499 564 """return reason string if it is ``dirty()``
500 565
501 566 Returned string should have enough information for the message
502 567 of exception.
503 568
504 569 This returns None, otherwise.
505 570 """
506 571 if self.dirty(ignoreupdate=ignoreupdate, missing=missing):
507 572 return _('uncommitted changes in subrepository "%s"'
508 573 ) % subrelpath(self)
509 574
510 575 def bailifchanged(self, ignoreupdate=False, hint=None):
511 576 """raise Abort if subrepository is ``dirty()``
512 577 """
513 578 dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate,
514 579 missing=True)
515 580 if dirtyreason:
516 581 raise error.Abort(dirtyreason, hint=hint)
517 582
518 583 def basestate(self):
519 584 """current working directory base state, disregarding .hgsubstate
520 585 state and working directory modifications"""
521 586 raise NotImplementedError
522 587
523 588 def checknested(self, path):
524 589 """check if path is a subrepository within this repository"""
525 590 return False
526 591
527 592 def commit(self, text, user, date):
528 593 """commit the current changes to the subrepo with the given
529 594 log message. Use given user and date if possible. Return the
530 595 new state of the subrepo.
531 596 """
532 597 raise NotImplementedError
533 598
534 599 def phase(self, state):
535 600 """returns phase of specified state in the subrepository.
536 601 """
537 602 return phases.public
538 603
539 604 def remove(self):
540 605 """remove the subrepo
541 606
542 607 (should verify the dirstate is not dirty first)
543 608 """
544 609 raise NotImplementedError
545 610
546 611 def get(self, state, overwrite=False):
547 612 """run whatever commands are needed to put the subrepo into
548 613 this state
549 614 """
550 615 raise NotImplementedError
551 616
552 617 def merge(self, state):
553 618 """merge currently-saved state with the new state."""
554 619 raise NotImplementedError
555 620
556 621 def push(self, opts):
557 622 """perform whatever action is analogous to 'hg push'
558 623
559 624 This may be a no-op on some systems.
560 625 """
561 626 raise NotImplementedError
562 627
563 628 def add(self, ui, match, prefix, explicitonly, **opts):
564 629 return []
565 630
566 631 def addremove(self, matcher, prefix, opts, dry_run, similarity):
567 632 self.ui.warn("%s: %s" % (prefix, _("addremove is not supported")))
568 633 return 1
569 634
570 635 def cat(self, match, fm, fntemplate, prefix, **opts):
571 636 return 1
572 637
573 638 def status(self, rev2, **opts):
574 639 return scmutil.status([], [], [], [], [], [], [])
575 640
576 641 def diff(self, ui, diffopts, node2, match, prefix, **opts):
577 642 pass
578 643
579 644 def outgoing(self, ui, dest, opts):
580 645 return 1
581 646
582 647 def incoming(self, ui, source, opts):
583 648 return 1
584 649
585 650 def files(self):
586 651 """return filename iterator"""
587 652 raise NotImplementedError
588 653
589 654 def filedata(self, name, decode):
590 655 """return file data, optionally passed through repo decoders"""
591 656 raise NotImplementedError
592 657
593 658 def fileflags(self, name):
594 659 """return file flags"""
595 660 return ''
596 661
597 662 def getfileset(self, expr):
598 663 """Resolve the fileset expression for this repo"""
599 664 return set()
600 665
601 666 def printfiles(self, ui, m, fm, fmt, subrepos):
602 667 """handle the files command for this subrepo"""
603 668 return 1
604 669
605 670 def archive(self, archiver, prefix, match=None, decode=True):
606 671 if match is not None:
607 672 files = [f for f in self.files() if match(f)]
608 673 else:
609 674 files = self.files()
610 675 total = len(files)
611 676 relpath = subrelpath(self)
612 677 self.ui.progress(_('archiving (%s)') % relpath, 0,
613 678 unit=_('files'), total=total)
614 679 for i, name in enumerate(files):
615 680 flags = self.fileflags(name)
616 681 mode = 'x' in flags and 0o755 or 0o644
617 682 symlink = 'l' in flags
618 683 archiver.addfile(prefix + self._path + '/' + name,
619 684 mode, symlink, self.filedata(name, decode))
620 685 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
621 686 unit=_('files'), total=total)
622 687 self.ui.progress(_('archiving (%s)') % relpath, None)
623 688 return total
624 689
625 690 def walk(self, match):
626 691 '''
627 692 walk recursively through the directory tree, finding all files
628 693 matched by the match function
629 694 '''
630 695
631 696 def forget(self, match, prefix):
632 697 return ([], [])
633 698
634 699 def removefiles(self, matcher, prefix, after, force, subrepos, warnings):
635 700 """remove the matched files from the subrepository and the filesystem,
636 701 possibly by force and/or after the file has been removed from the
637 702 filesystem. Return 0 on success, 1 on any warning.
638 703 """
639 704 warnings.append(_("warning: removefiles not implemented (%s)")
640 705 % self._path)
641 706 return 1
642 707
643 708 def revert(self, substate, *pats, **opts):
644 709 self.ui.warn(_('%s: reverting %s subrepos is unsupported\n') \
645 710 % (substate[0], substate[2]))
646 711 return []
647 712
648 713 def shortid(self, revid):
649 714 return revid
650 715
651 716 def unshare(self):
652 717 '''
653 718 convert this repository from shared to normal storage.
654 719 '''
655 720
656 721 def verify(self):
657 722 '''verify the integrity of the repository. Return 0 on success or
658 723 warning, 1 on any error.
659 724 '''
660 725 return 0
661 726
662 727 @propertycache
663 728 def wvfs(self):
664 729 """return vfs to access the working directory of this subrepository
665 730 """
666 731 return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path))
667 732
668 733 @propertycache
669 734 def _relpath(self):
670 735 """return path to this subrepository as seen from outermost repository
671 736 """
672 737 return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path)
673 738
674 739 class hgsubrepo(abstractsubrepo):
675 740 def __init__(self, ctx, path, state, allowcreate):
676 741 super(hgsubrepo, self).__init__(ctx, path)
677 742 self._state = state
678 743 r = ctx.repo()
679 744 root = r.wjoin(path)
680 745 create = allowcreate and not r.wvfs.exists('%s/.hg' % path)
681 746 self._repo = hg.repository(r.baseui, root, create=create)
682 747
683 748 # Propagate the parent's --hidden option
684 749 if r is r.unfiltered():
685 750 self._repo = self._repo.unfiltered()
686 751
687 752 self.ui = self._repo.ui
688 753 for s, k in [('ui', 'commitsubrepos')]:
689 754 v = r.ui.config(s, k)
690 755 if v:
691 756 self.ui.setconfig(s, k, v, 'subrepo')
692 757 # internal config: ui._usedassubrepo
693 758 self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
694 759 self._initrepo(r, state[0], create)
695 760
696 761 @annotatesubrepoerror
697 762 def addwebdirpath(self, serverpath, webconf):
698 763 cmdutil.addwebdirpath(self._repo, subrelpath(self), webconf)
699 764
700 765 def storeclean(self, path):
701 766 with self._repo.lock():
702 767 return self._storeclean(path)
703 768
704 769 def _storeclean(self, path):
705 770 clean = True
706 771 itercache = self._calcstorehash(path)
707 772 for filehash in self._readstorehashcache(path):
708 773 if filehash != next(itercache, None):
709 774 clean = False
710 775 break
711 776 if clean:
712 777 # if not empty:
713 778 # the cached and current pull states have a different size
714 779 clean = next(itercache, None) is None
715 780 return clean
716 781
717 782 def _calcstorehash(self, remotepath):
718 783 '''calculate a unique "store hash"
719 784
720 785 This method is used to to detect when there are changes that may
721 786 require a push to a given remote path.'''
722 787 # sort the files that will be hashed in increasing (likely) file size
723 788 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
724 789 yield '# %s\n' % _expandedabspath(remotepath)
725 790 vfs = self._repo.vfs
726 791 for relname in filelist:
727 792 filehash = hashlib.sha1(vfs.tryread(relname)).hexdigest()
728 793 yield '%s = %s\n' % (relname, filehash)
729 794
730 795 @propertycache
731 796 def _cachestorehashvfs(self):
732 797 return vfsmod.vfs(self._repo.vfs.join('cache/storehash'))
733 798
734 799 def _readstorehashcache(self, remotepath):
735 800 '''read the store hash cache for a given remote repository'''
736 801 cachefile = _getstorehashcachename(remotepath)
737 802 return self._cachestorehashvfs.tryreadlines(cachefile, 'r')
738 803
739 804 def _cachestorehash(self, remotepath):
740 805 '''cache the current store hash
741 806
742 807 Each remote repo requires its own store hash cache, because a subrepo
743 808 store may be "clean" versus a given remote repo, but not versus another
744 809 '''
745 810 cachefile = _getstorehashcachename(remotepath)
746 811 with self._repo.lock():
747 812 storehash = list(self._calcstorehash(remotepath))
748 813 vfs = self._cachestorehashvfs
749 814 vfs.writelines(cachefile, storehash, mode='w', notindexed=True)
750 815
751 816 def _getctx(self):
752 817 '''fetch the context for this subrepo revision, possibly a workingctx
753 818 '''
754 819 if self._ctx.rev() is None:
755 820 return self._repo[None] # workingctx if parent is workingctx
756 821 else:
757 822 rev = self._state[1]
758 823 return self._repo[rev]
759 824
760 825 @annotatesubrepoerror
761 826 def _initrepo(self, parentrepo, source, create):
762 827 self._repo._subparent = parentrepo
763 828 self._repo._subsource = source
764 829
765 830 if create:
766 831 lines = ['[paths]\n']
767 832
768 833 def addpathconfig(key, value):
769 834 if value:
770 835 lines.append('%s = %s\n' % (key, value))
771 836 self.ui.setconfig('paths', key, value, 'subrepo')
772 837
773 838 defpath = _abssource(self._repo, abort=False)
774 839 defpushpath = _abssource(self._repo, True, abort=False)
775 840 addpathconfig('default', defpath)
776 841 if defpath != defpushpath:
777 842 addpathconfig('default-push', defpushpath)
778 843
779 844 fp = self._repo.vfs("hgrc", "w", text=True)
780 845 try:
781 846 fp.write(''.join(lines))
782 847 finally:
783 848 fp.close()
784 849
785 850 @annotatesubrepoerror
786 851 def add(self, ui, match, prefix, explicitonly, **opts):
787 852 return cmdutil.add(ui, self._repo, match,
788 853 self.wvfs.reljoin(prefix, self._path),
789 854 explicitonly, **opts)
790 855
791 856 @annotatesubrepoerror
792 857 def addremove(self, m, prefix, opts, dry_run, similarity):
793 858 # In the same way as sub directories are processed, once in a subrepo,
794 859 # always entry any of its subrepos. Don't corrupt the options that will
795 860 # be used to process sibling subrepos however.
796 861 opts = copy.copy(opts)
797 862 opts['subrepos'] = True
798 863 return scmutil.addremove(self._repo, m,
799 864 self.wvfs.reljoin(prefix, self._path), opts,
800 865 dry_run, similarity)
801 866
802 867 @annotatesubrepoerror
803 868 def cat(self, match, fm, fntemplate, prefix, **opts):
804 869 rev = self._state[1]
805 870 ctx = self._repo[rev]
806 871 return cmdutil.cat(self.ui, self._repo, ctx, match, fm, fntemplate,
807 872 prefix, **opts)
808 873
809 874 @annotatesubrepoerror
810 875 def status(self, rev2, **opts):
811 876 try:
812 877 rev1 = self._state[1]
813 878 ctx1 = self._repo[rev1]
814 879 ctx2 = self._repo[rev2]
815 880 return self._repo.status(ctx1, ctx2, **opts)
816 881 except error.RepoLookupError as inst:
817 882 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
818 883 % (inst, subrelpath(self)))
819 884 return scmutil.status([], [], [], [], [], [], [])
820 885
821 886 @annotatesubrepoerror
822 887 def diff(self, ui, diffopts, node2, match, prefix, **opts):
823 888 try:
824 889 node1 = node.bin(self._state[1])
825 890 # We currently expect node2 to come from substate and be
826 891 # in hex format
827 892 if node2 is not None:
828 893 node2 = node.bin(node2)
829 894 cmdutil.diffordiffstat(ui, self._repo, diffopts,
830 895 node1, node2, match,
831 896 prefix=posixpath.join(prefix, self._path),
832 897 listsubrepos=True, **opts)
833 898 except error.RepoLookupError as inst:
834 899 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
835 900 % (inst, subrelpath(self)))
836 901
837 902 @annotatesubrepoerror
838 903 def archive(self, archiver, prefix, match=None, decode=True):
839 904 self._get(self._state + ('hg',))
840 905 total = abstractsubrepo.archive(self, archiver, prefix, match)
841 906 rev = self._state[1]
842 907 ctx = self._repo[rev]
843 908 for subpath in ctx.substate:
844 909 s = subrepo(ctx, subpath, True)
845 910 submatch = matchmod.subdirmatcher(subpath, match)
846 911 total += s.archive(archiver, prefix + self._path + '/', submatch,
847 912 decode)
848 913 return total
849 914
850 915 @annotatesubrepoerror
851 916 def dirty(self, ignoreupdate=False, missing=False):
852 917 r = self._state[1]
853 918 if r == '' and not ignoreupdate: # no state recorded
854 919 return True
855 920 w = self._repo[None]
856 921 if r != w.p1().hex() and not ignoreupdate:
857 922 # different version checked out
858 923 return True
859 924 return w.dirty(missing=missing) # working directory changed
860 925
861 926 def basestate(self):
862 927 return self._repo['.'].hex()
863 928
864 929 def checknested(self, path):
865 930 return self._repo._checknested(self._repo.wjoin(path))
866 931
867 932 @annotatesubrepoerror
868 933 def commit(self, text, user, date):
869 934 # don't bother committing in the subrepo if it's only been
870 935 # updated
871 936 if not self.dirty(True):
872 937 return self._repo['.'].hex()
873 938 self.ui.debug("committing subrepo %s\n" % subrelpath(self))
874 939 n = self._repo.commit(text, user, date)
875 940 if not n:
876 941 return self._repo['.'].hex() # different version checked out
877 942 return node.hex(n)
878 943
879 944 @annotatesubrepoerror
880 945 def phase(self, state):
881 946 return self._repo[state].phase()
882 947
883 948 @annotatesubrepoerror
884 949 def remove(self):
885 950 # we can't fully delete the repository as it may contain
886 951 # local-only history
887 952 self.ui.note(_('removing subrepo %s\n') % subrelpath(self))
888 953 hg.clean(self._repo, node.nullid, False)
889 954
890 955 def _get(self, state):
891 956 source, revision, kind = state
892 957 parentrepo = self._repo._subparent
893 958
894 959 if revision in self._repo.unfiltered():
895 960 # Allow shared subrepos tracked at null to setup the sharedpath
896 961 if len(self._repo) != 0 or not parentrepo.shared():
897 962 return True
898 963 self._repo._subsource = source
899 964 srcurl = _abssource(self._repo)
900 965 other = hg.peer(self._repo, {}, srcurl)
901 966 if len(self._repo) == 0:
902 967 # use self._repo.vfs instead of self.wvfs to remove .hg only
903 968 self._repo.vfs.rmtree()
904 969 if parentrepo.shared():
905 970 self.ui.status(_('sharing subrepo %s from %s\n')
906 971 % (subrelpath(self), srcurl))
907 972 shared = hg.share(self._repo._subparent.baseui,
908 973 other, self._repo.root,
909 974 update=False, bookmarks=False)
910 975 self._repo = shared.local()
911 976 else:
912 977 self.ui.status(_('cloning subrepo %s from %s\n')
913 978 % (subrelpath(self), srcurl))
914 979 other, cloned = hg.clone(self._repo._subparent.baseui, {},
915 980 other, self._repo.root,
916 981 update=False)
917 982 self._repo = cloned.local()
918 983 self._initrepo(parentrepo, source, create=True)
919 984 self._cachestorehash(srcurl)
920 985 else:
921 986 self.ui.status(_('pulling subrepo %s from %s\n')
922 987 % (subrelpath(self), srcurl))
923 988 cleansub = self.storeclean(srcurl)
924 989 exchange.pull(self._repo, other)
925 990 if cleansub:
926 991 # keep the repo clean after pull
927 992 self._cachestorehash(srcurl)
928 993 return False
929 994
930 995 @annotatesubrepoerror
931 996 def get(self, state, overwrite=False):
932 997 inrepo = self._get(state)
933 998 source, revision, kind = state
934 999 repo = self._repo
935 1000 repo.ui.debug("getting subrepo %s\n" % self._path)
936 1001 if inrepo:
937 1002 urepo = repo.unfiltered()
938 1003 ctx = urepo[revision]
939 1004 if ctx.hidden():
940 1005 urepo.ui.warn(
941 1006 _('revision %s in subrepository "%s" is hidden\n') \
942 1007 % (revision[0:12], self._path))
943 1008 repo = urepo
944 1009 hg.updaterepo(repo, revision, overwrite)
945 1010
946 1011 @annotatesubrepoerror
947 1012 def merge(self, state):
948 1013 self._get(state)
949 1014 cur = self._repo['.']
950 1015 dst = self._repo[state[1]]
951 1016 anc = dst.ancestor(cur)
952 1017
953 1018 def mergefunc():
954 1019 if anc == cur and dst.branch() == cur.branch():
955 1020 self.ui.debug('updating subrepository "%s"\n'
956 1021 % subrelpath(self))
957 1022 hg.update(self._repo, state[1])
958 1023 elif anc == dst:
959 1024 self.ui.debug('skipping subrepository "%s"\n'
960 1025 % subrelpath(self))
961 1026 else:
962 1027 self.ui.debug('merging subrepository "%s"\n' % subrelpath(self))
963 1028 hg.merge(self._repo, state[1], remind=False)
964 1029
965 1030 wctx = self._repo[None]
966 1031 if self.dirty():
967 1032 if anc != dst:
968 1033 if _updateprompt(self.ui, self, wctx.dirty(), cur, dst):
969 1034 mergefunc()
970 1035 else:
971 1036 mergefunc()
972 1037 else:
973 1038 mergefunc()
974 1039
975 1040 @annotatesubrepoerror
976 1041 def push(self, opts):
977 1042 force = opts.get('force')
978 1043 newbranch = opts.get('new_branch')
979 1044 ssh = opts.get('ssh')
980 1045
981 1046 # push subrepos depth-first for coherent ordering
982 1047 c = self._repo['']
983 1048 subs = c.substate # only repos that are committed
984 1049 for s in sorted(subs):
985 1050 if c.sub(s).push(opts) == 0:
986 1051 return False
987 1052
988 1053 dsturl = _abssource(self._repo, True)
989 1054 if not force:
990 1055 if self.storeclean(dsturl):
991 1056 self.ui.status(
992 1057 _('no changes made to subrepo %s since last push to %s\n')
993 1058 % (subrelpath(self), dsturl))
994 1059 return None
995 1060 self.ui.status(_('pushing subrepo %s to %s\n') %
996 1061 (subrelpath(self), dsturl))
997 1062 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
998 1063 res = exchange.push(self._repo, other, force, newbranch=newbranch)
999 1064
1000 1065 # the repo is now clean
1001 1066 self._cachestorehash(dsturl)
1002 1067 return res.cgresult
1003 1068
1004 1069 @annotatesubrepoerror
1005 1070 def outgoing(self, ui, dest, opts):
1006 1071 if 'rev' in opts or 'branch' in opts:
1007 1072 opts = copy.copy(opts)
1008 1073 opts.pop('rev', None)
1009 1074 opts.pop('branch', None)
1010 1075 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
1011 1076
1012 1077 @annotatesubrepoerror
1013 1078 def incoming(self, ui, source, opts):
1014 1079 if 'rev' in opts or 'branch' in opts:
1015 1080 opts = copy.copy(opts)
1016 1081 opts.pop('rev', None)
1017 1082 opts.pop('branch', None)
1018 1083 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
1019 1084
1020 1085 @annotatesubrepoerror
1021 1086 def files(self):
1022 1087 rev = self._state[1]
1023 1088 ctx = self._repo[rev]
1024 1089 return ctx.manifest().keys()
1025 1090
1026 1091 def filedata(self, name, decode):
1027 1092 rev = self._state[1]
1028 1093 data = self._repo[rev][name].data()
1029 1094 if decode:
1030 1095 data = self._repo.wwritedata(name, data)
1031 1096 return data
1032 1097
1033 1098 def fileflags(self, name):
1034 1099 rev = self._state[1]
1035 1100 ctx = self._repo[rev]
1036 1101 return ctx.flags(name)
1037 1102
1038 1103 @annotatesubrepoerror
1039 1104 def printfiles(self, ui, m, fm, fmt, subrepos):
1040 1105 # If the parent context is a workingctx, use the workingctx here for
1041 1106 # consistency.
1042 1107 if self._ctx.rev() is None:
1043 1108 ctx = self._repo[None]
1044 1109 else:
1045 1110 rev = self._state[1]
1046 1111 ctx = self._repo[rev]
1047 1112 return cmdutil.files(ui, ctx, m, fm, fmt, subrepos)
1048 1113
1049 1114 @annotatesubrepoerror
1050 1115 def getfileset(self, expr):
1051 1116 if self._ctx.rev() is None:
1052 1117 ctx = self._repo[None]
1053 1118 else:
1054 1119 rev = self._state[1]
1055 1120 ctx = self._repo[rev]
1056 1121
1057 1122 files = ctx.getfileset(expr)
1058 1123
1059 1124 for subpath in ctx.substate:
1060 1125 sub = ctx.sub(subpath)
1061 1126
1062 1127 try:
1063 1128 files.extend(subpath + '/' + f for f in sub.getfileset(expr))
1064 1129 except error.LookupError:
1065 1130 self.ui.status(_("skipping missing subrepository: %s\n")
1066 1131 % self.wvfs.reljoin(reporelpath(self), subpath))
1067 1132 return files
1068 1133
1069 1134 def walk(self, match):
1070 1135 ctx = self._repo[None]
1071 1136 return ctx.walk(match)
1072 1137
1073 1138 @annotatesubrepoerror
1074 1139 def forget(self, match, prefix):
1075 1140 return cmdutil.forget(self.ui, self._repo, match,
1076 1141 self.wvfs.reljoin(prefix, self._path), True)
1077 1142
1078 1143 @annotatesubrepoerror
1079 1144 def removefiles(self, matcher, prefix, after, force, subrepos, warnings):
1080 1145 return cmdutil.remove(self.ui, self._repo, matcher,
1081 1146 self.wvfs.reljoin(prefix, self._path),
1082 1147 after, force, subrepos)
1083 1148
1084 1149 @annotatesubrepoerror
1085 1150 def revert(self, substate, *pats, **opts):
1086 1151 # reverting a subrepo is a 2 step process:
1087 1152 # 1. if the no_backup is not set, revert all modified
1088 1153 # files inside the subrepo
1089 1154 # 2. update the subrepo to the revision specified in
1090 1155 # the corresponding substate dictionary
1091 1156 self.ui.status(_('reverting subrepo %s\n') % substate[0])
1092 1157 if not opts.get('no_backup'):
1093 1158 # Revert all files on the subrepo, creating backups
1094 1159 # Note that this will not recursively revert subrepos
1095 1160 # We could do it if there was a set:subrepos() predicate
1096 1161 opts = opts.copy()
1097 1162 opts['date'] = None
1098 1163 opts['rev'] = substate[1]
1099 1164
1100 1165 self.filerevert(*pats, **opts)
1101 1166
1102 1167 # Update the repo to the revision specified in the given substate
1103 1168 if not opts.get('dry_run'):
1104 1169 self.get(substate, overwrite=True)
1105 1170
1106 1171 def filerevert(self, *pats, **opts):
1107 1172 ctx = self._repo[opts['rev']]
1108 1173 parents = self._repo.dirstate.parents()
1109 1174 if opts.get('all'):
1110 1175 pats = ['set:modified()']
1111 1176 else:
1112 1177 pats = []
1113 1178 cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
1114 1179
1115 1180 def shortid(self, revid):
1116 1181 return revid[:12]
1117 1182
1118 1183 @annotatesubrepoerror
1119 1184 def unshare(self):
1120 1185 # subrepo inherently violates our import layering rules
1121 1186 # because it wants to make repo objects from deep inside the stack
1122 1187 # so we manually delay the circular imports to not break
1123 1188 # scripts that don't use our demand-loading
1124 1189 global hg
1125 1190 from . import hg as h
1126 1191 hg = h
1127 1192
1128 1193 # Nothing prevents a user from sharing in a repo, and then making that a
1129 1194 # subrepo. Alternately, the previous unshare attempt may have failed
1130 1195 # part way through. So recurse whether or not this layer is shared.
1131 1196 if self._repo.shared():
1132 1197 self.ui.status(_("unsharing subrepo '%s'\n") % self._relpath)
1133 1198
1134 1199 hg.unshare(self.ui, self._repo)
1135 1200
1136 1201 def verify(self):
1137 1202 try:
1138 1203 rev = self._state[1]
1139 1204 ctx = self._repo.unfiltered()[rev]
1140 1205 if ctx.hidden():
1141 1206 # Since hidden revisions aren't pushed/pulled, it seems worth an
1142 1207 # explicit warning.
1143 1208 ui = self._repo.ui
1144 1209 ui.warn(_("subrepo '%s' is hidden in revision %s\n") %
1145 1210 (self._relpath, node.short(self._ctx.node())))
1146 1211 return 0
1147 1212 except error.RepoLookupError:
1148 1213 # A missing subrepo revision may be a case of needing to pull it, so
1149 1214 # don't treat this as an error.
1150 1215 self._repo.ui.warn(_("subrepo '%s' not found in revision %s\n") %
1151 1216 (self._relpath, node.short(self._ctx.node())))
1152 1217 return 0
1153 1218
1154 1219 @propertycache
1155 1220 def wvfs(self):
1156 1221 """return own wvfs for efficiency and consistency
1157 1222 """
1158 1223 return self._repo.wvfs
1159 1224
1160 1225 @propertycache
1161 1226 def _relpath(self):
1162 1227 """return path to this subrepository as seen from outermost repository
1163 1228 """
1164 1229 # Keep consistent dir separators by avoiding vfs.join(self._path)
1165 1230 return reporelpath(self._repo)
1166 1231
1167 1232 class svnsubrepo(abstractsubrepo):
1168 1233 def __init__(self, ctx, path, state, allowcreate):
1169 1234 super(svnsubrepo, self).__init__(ctx, path)
1170 1235 self._state = state
1171 1236 self._exe = util.findexe('svn')
1172 1237 if not self._exe:
1173 1238 raise error.Abort(_("'svn' executable not found for subrepo '%s'")
1174 1239 % self._path)
1175 1240
1176 1241 def _svncommand(self, commands, filename='', failok=False):
1177 1242 cmd = [self._exe]
1178 1243 extrakw = {}
1179 1244 if not self.ui.interactive():
1180 1245 # Making stdin be a pipe should prevent svn from behaving
1181 1246 # interactively even if we can't pass --non-interactive.
1182 1247 extrakw['stdin'] = subprocess.PIPE
1183 1248 # Starting in svn 1.5 --non-interactive is a global flag
1184 1249 # instead of being per-command, but we need to support 1.4 so
1185 1250 # we have to be intelligent about what commands take
1186 1251 # --non-interactive.
1187 1252 if commands[0] in ('update', 'checkout', 'commit'):
1188 1253 cmd.append('--non-interactive')
1189 1254 cmd.extend(commands)
1190 1255 if filename is not None:
1191 1256 path = self.wvfs.reljoin(self._ctx.repo().origroot,
1192 1257 self._path, filename)
1193 1258 cmd.append(path)
1194 1259 env = dict(encoding.environ)
1195 1260 # Avoid localized output, preserve current locale for everything else.
1196 1261 lc_all = env.get('LC_ALL')
1197 1262 if lc_all:
1198 1263 env['LANG'] = lc_all
1199 1264 del env['LC_ALL']
1200 1265 env['LC_MESSAGES'] = 'C'
1201 1266 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
1202 1267 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1203 1268 universal_newlines=True, env=env, **extrakw)
1204 1269 stdout, stderr = p.communicate()
1205 1270 stderr = stderr.strip()
1206 1271 if not failok:
1207 1272 if p.returncode:
1208 1273 raise error.Abort(stderr or 'exited with code %d'
1209 1274 % p.returncode)
1210 1275 if stderr:
1211 1276 self.ui.warn(stderr + '\n')
1212 1277 return stdout, stderr
1213 1278
1214 1279 @propertycache
1215 1280 def _svnversion(self):
1216 1281 output, err = self._svncommand(['--version', '--quiet'], filename=None)
1217 1282 m = re.search(br'^(\d+)\.(\d+)', output)
1218 1283 if not m:
1219 1284 raise error.Abort(_('cannot retrieve svn tool version'))
1220 1285 return (int(m.group(1)), int(m.group(2)))
1221 1286
1222 1287 def _wcrevs(self):
1223 1288 # Get the working directory revision as well as the last
1224 1289 # commit revision so we can compare the subrepo state with
1225 1290 # both. We used to store the working directory one.
1226 1291 output, err = self._svncommand(['info', '--xml'])
1227 1292 doc = xml.dom.minidom.parseString(output)
1228 1293 entries = doc.getElementsByTagName('entry')
1229 1294 lastrev, rev = '0', '0'
1230 1295 if entries:
1231 1296 rev = str(entries[0].getAttribute('revision')) or '0'
1232 1297 commits = entries[0].getElementsByTagName('commit')
1233 1298 if commits:
1234 1299 lastrev = str(commits[0].getAttribute('revision')) or '0'
1235 1300 return (lastrev, rev)
1236 1301
1237 1302 def _wcrev(self):
1238 1303 return self._wcrevs()[0]
1239 1304
1240 1305 def _wcchanged(self):
1241 1306 """Return (changes, extchanges, missing) where changes is True
1242 1307 if the working directory was changed, extchanges is
1243 1308 True if any of these changes concern an external entry and missing
1244 1309 is True if any change is a missing entry.
1245 1310 """
1246 1311 output, err = self._svncommand(['status', '--xml'])
1247 1312 externals, changes, missing = [], [], []
1248 1313 doc = xml.dom.minidom.parseString(output)
1249 1314 for e in doc.getElementsByTagName('entry'):
1250 1315 s = e.getElementsByTagName('wc-status')
1251 1316 if not s:
1252 1317 continue
1253 1318 item = s[0].getAttribute('item')
1254 1319 props = s[0].getAttribute('props')
1255 1320 path = e.getAttribute('path')
1256 1321 if item == 'external':
1257 1322 externals.append(path)
1258 1323 elif item == 'missing':
1259 1324 missing.append(path)
1260 1325 if (item not in ('', 'normal', 'unversioned', 'external')
1261 1326 or props not in ('', 'none', 'normal')):
1262 1327 changes.append(path)
1263 1328 for path in changes:
1264 1329 for ext in externals:
1265 1330 if path == ext or path.startswith(ext + pycompat.ossep):
1266 1331 return True, True, bool(missing)
1267 1332 return bool(changes), False, bool(missing)
1268 1333
1269 1334 def dirty(self, ignoreupdate=False, missing=False):
1270 1335 wcchanged = self._wcchanged()
1271 1336 changed = wcchanged[0] or (missing and wcchanged[2])
1272 1337 if not changed:
1273 1338 if self._state[1] in self._wcrevs() or ignoreupdate:
1274 1339 return False
1275 1340 return True
1276 1341
1277 1342 def basestate(self):
1278 1343 lastrev, rev = self._wcrevs()
1279 1344 if lastrev != rev:
1280 1345 # Last committed rev is not the same than rev. We would
1281 1346 # like to take lastrev but we do not know if the subrepo
1282 1347 # URL exists at lastrev. Test it and fallback to rev it
1283 1348 # is not there.
1284 1349 try:
1285 1350 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
1286 1351 return lastrev
1287 1352 except error.Abort:
1288 1353 pass
1289 1354 return rev
1290 1355
1291 1356 @annotatesubrepoerror
1292 1357 def commit(self, text, user, date):
1293 1358 # user and date are out of our hands since svn is centralized
1294 1359 changed, extchanged, missing = self._wcchanged()
1295 1360 if not changed:
1296 1361 return self.basestate()
1297 1362 if extchanged:
1298 1363 # Do not try to commit externals
1299 1364 raise error.Abort(_('cannot commit svn externals'))
1300 1365 if missing:
1301 1366 # svn can commit with missing entries but aborting like hg
1302 1367 # seems a better approach.
1303 1368 raise error.Abort(_('cannot commit missing svn entries'))
1304 1369 commitinfo, err = self._svncommand(['commit', '-m', text])
1305 1370 self.ui.status(commitinfo)
1306 1371 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1307 1372 if not newrev:
1308 1373 if not commitinfo.strip():
1309 1374 # Sometimes, our definition of "changed" differs from
1310 1375 # svn one. For instance, svn ignores missing files
1311 1376 # when committing. If there are only missing files, no
1312 1377 # commit is made, no output and no error code.
1313 1378 raise error.Abort(_('failed to commit svn changes'))
1314 1379 raise error.Abort(commitinfo.splitlines()[-1])
1315 1380 newrev = newrev.groups()[0]
1316 1381 self.ui.status(self._svncommand(['update', '-r', newrev])[0])
1317 1382 return newrev
1318 1383
1319 1384 @annotatesubrepoerror
1320 1385 def remove(self):
1321 1386 if self.dirty():
1322 1387 self.ui.warn(_('not removing repo %s because '
1323 1388 'it has changes.\n') % self._path)
1324 1389 return
1325 1390 self.ui.note(_('removing subrepo %s\n') % self._path)
1326 1391
1327 1392 self.wvfs.rmtree(forcibly=True)
1328 1393 try:
1329 1394 pwvfs = self._ctx.repo().wvfs
1330 1395 pwvfs.removedirs(pwvfs.dirname(self._path))
1331 1396 except OSError:
1332 1397 pass
1333 1398
1334 1399 @annotatesubrepoerror
1335 1400 def get(self, state, overwrite=False):
1336 1401 if overwrite:
1337 1402 self._svncommand(['revert', '--recursive'])
1338 1403 args = ['checkout']
1339 1404 if self._svnversion >= (1, 5):
1340 1405 args.append('--force')
1341 1406 # The revision must be specified at the end of the URL to properly
1342 1407 # update to a directory which has since been deleted and recreated.
1343 1408 args.append('%s@%s' % (state[0], state[1]))
1344 1409
1345 1410 # SEC: check that the ssh url is safe
1346 1411 util.checksafessh(state[0])
1347 1412
1348 1413 status, err = self._svncommand(args, failok=True)
1349 1414 _sanitize(self.ui, self.wvfs, '.svn')
1350 1415 if not re.search('Checked out revision [0-9]+.', status):
1351 1416 if ('is already a working copy for a different URL' in err
1352 1417 and (self._wcchanged()[:2] == (False, False))):
1353 1418 # obstructed but clean working copy, so just blow it away.
1354 1419 self.remove()
1355 1420 self.get(state, overwrite=False)
1356 1421 return
1357 1422 raise error.Abort((status or err).splitlines()[-1])
1358 1423 self.ui.status(status)
1359 1424
1360 1425 @annotatesubrepoerror
1361 1426 def merge(self, state):
1362 1427 old = self._state[1]
1363 1428 new = state[1]
1364 1429 wcrev = self._wcrev()
1365 1430 if new != wcrev:
1366 1431 dirty = old == wcrev or self._wcchanged()[0]
1367 1432 if _updateprompt(self.ui, self, dirty, wcrev, new):
1368 1433 self.get(state, False)
1369 1434
1370 1435 def push(self, opts):
1371 1436 # push is a no-op for SVN
1372 1437 return True
1373 1438
1374 1439 @annotatesubrepoerror
1375 1440 def files(self):
1376 1441 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1377 1442 doc = xml.dom.minidom.parseString(output)
1378 1443 paths = []
1379 1444 for e in doc.getElementsByTagName('entry'):
1380 1445 kind = str(e.getAttribute('kind'))
1381 1446 if kind != 'file':
1382 1447 continue
1383 1448 name = ''.join(c.data for c
1384 1449 in e.getElementsByTagName('name')[0].childNodes
1385 1450 if c.nodeType == c.TEXT_NODE)
1386 1451 paths.append(name.encode('utf-8'))
1387 1452 return paths
1388 1453
1389 1454 def filedata(self, name, decode):
1390 1455 return self._svncommand(['cat'], name)[0]
1391 1456
1392 1457
1393 1458 class gitsubrepo(abstractsubrepo):
1394 1459 def __init__(self, ctx, path, state, allowcreate):
1395 1460 super(gitsubrepo, self).__init__(ctx, path)
1396 1461 self._state = state
1397 1462 self._abspath = ctx.repo().wjoin(path)
1398 1463 self._subparent = ctx.repo()
1399 1464 self._ensuregit()
1400 1465
1401 1466 def _ensuregit(self):
1402 1467 try:
1403 1468 self._gitexecutable = 'git'
1404 1469 out, err = self._gitnodir(['--version'])
1405 1470 except OSError as e:
1406 1471 genericerror = _("error executing git for subrepo '%s': %s")
1407 1472 notfoundhint = _("check git is installed and in your PATH")
1408 1473 if e.errno != errno.ENOENT:
1409 1474 raise error.Abort(genericerror % (
1410 1475 self._path, encoding.strtolocal(e.strerror)))
1411 1476 elif pycompat.iswindows:
1412 1477 try:
1413 1478 self._gitexecutable = 'git.cmd'
1414 1479 out, err = self._gitnodir(['--version'])
1415 1480 except OSError as e2:
1416 1481 if e2.errno == errno.ENOENT:
1417 1482 raise error.Abort(_("couldn't find 'git' or 'git.cmd'"
1418 1483 " for subrepo '%s'") % self._path,
1419 1484 hint=notfoundhint)
1420 1485 else:
1421 1486 raise error.Abort(genericerror % (self._path,
1422 1487 encoding.strtolocal(e2.strerror)))
1423 1488 else:
1424 1489 raise error.Abort(_("couldn't find git for subrepo '%s'")
1425 1490 % self._path, hint=notfoundhint)
1426 1491 versionstatus = self._checkversion(out)
1427 1492 if versionstatus == 'unknown':
1428 1493 self.ui.warn(_('cannot retrieve git version\n'))
1429 1494 elif versionstatus == 'abort':
1430 1495 raise error.Abort(_('git subrepo requires at least 1.6.0 or later'))
1431 1496 elif versionstatus == 'warning':
1432 1497 self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1433 1498
1434 1499 @staticmethod
1435 1500 def _gitversion(out):
1436 1501 m = re.search(br'^git version (\d+)\.(\d+)\.(\d+)', out)
1437 1502 if m:
1438 1503 return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
1439 1504
1440 1505 m = re.search(br'^git version (\d+)\.(\d+)', out)
1441 1506 if m:
1442 1507 return (int(m.group(1)), int(m.group(2)), 0)
1443 1508
1444 1509 return -1
1445 1510
1446 1511 @staticmethod
1447 1512 def _checkversion(out):
1448 1513 '''ensure git version is new enough
1449 1514
1450 1515 >>> _checkversion = gitsubrepo._checkversion
1451 1516 >>> _checkversion(b'git version 1.6.0')
1452 1517 'ok'
1453 1518 >>> _checkversion(b'git version 1.8.5')
1454 1519 'ok'
1455 1520 >>> _checkversion(b'git version 1.4.0')
1456 1521 'abort'
1457 1522 >>> _checkversion(b'git version 1.5.0')
1458 1523 'warning'
1459 1524 >>> _checkversion(b'git version 1.9-rc0')
1460 1525 'ok'
1461 1526 >>> _checkversion(b'git version 1.9.0.265.g81cdec2')
1462 1527 'ok'
1463 1528 >>> _checkversion(b'git version 1.9.0.GIT')
1464 1529 'ok'
1465 1530 >>> _checkversion(b'git version 12345')
1466 1531 'unknown'
1467 1532 >>> _checkversion(b'no')
1468 1533 'unknown'
1469 1534 '''
1470 1535 version = gitsubrepo._gitversion(out)
1471 1536 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1472 1537 # despite the docstring comment. For now, error on 1.4.0, warn on
1473 1538 # 1.5.0 but attempt to continue.
1474 1539 if version == -1:
1475 1540 return 'unknown'
1476 1541 if version < (1, 5, 0):
1477 1542 return 'abort'
1478 1543 elif version < (1, 6, 0):
1479 1544 return 'warning'
1480 1545 return 'ok'
1481 1546
1482 1547 def _gitcommand(self, commands, env=None, stream=False):
1483 1548 return self._gitdir(commands, env=env, stream=stream)[0]
1484 1549
1485 1550 def _gitdir(self, commands, env=None, stream=False):
1486 1551 return self._gitnodir(commands, env=env, stream=stream,
1487 1552 cwd=self._abspath)
1488 1553
1489 1554 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1490 1555 """Calls the git command
1491 1556
1492 1557 The methods tries to call the git command. versions prior to 1.6.0
1493 1558 are not supported and very probably fail.
1494 1559 """
1495 1560 self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1496 1561 if env is None:
1497 1562 env = encoding.environ.copy()
1498 1563 # disable localization for Git output (issue5176)
1499 1564 env['LC_ALL'] = 'C'
1500 1565 # fix for Git CVE-2015-7545
1501 1566 if 'GIT_ALLOW_PROTOCOL' not in env:
1502 1567 env['GIT_ALLOW_PROTOCOL'] = 'file:git:http:https:ssh'
1503 1568 # unless ui.quiet is set, print git's stderr,
1504 1569 # which is mostly progress and useful info
1505 1570 errpipe = None
1506 1571 if self.ui.quiet:
1507 1572 errpipe = open(os.devnull, 'w')
1508 1573 if self.ui._colormode and len(commands) and commands[0] == "diff":
1509 1574 # insert the argument in the front,
1510 1575 # the end of git diff arguments is used for paths
1511 1576 commands.insert(1, '--color')
1512 1577 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1513 1578 cwd=cwd, env=env, close_fds=util.closefds,
1514 1579 stdout=subprocess.PIPE, stderr=errpipe)
1515 1580 if stream:
1516 1581 return p.stdout, None
1517 1582
1518 1583 retdata = p.stdout.read().strip()
1519 1584 # wait for the child to exit to avoid race condition.
1520 1585 p.wait()
1521 1586
1522 1587 if p.returncode != 0 and p.returncode != 1:
1523 1588 # there are certain error codes that are ok
1524 1589 command = commands[0]
1525 1590 if command in ('cat-file', 'symbolic-ref'):
1526 1591 return retdata, p.returncode
1527 1592 # for all others, abort
1528 1593 raise error.Abort(_('git %s error %d in %s') %
1529 1594 (command, p.returncode, self._relpath))
1530 1595
1531 1596 return retdata, p.returncode
1532 1597
1533 1598 def _gitmissing(self):
1534 1599 return not self.wvfs.exists('.git')
1535 1600
1536 1601 def _gitstate(self):
1537 1602 return self._gitcommand(['rev-parse', 'HEAD'])
1538 1603
1539 1604 def _gitcurrentbranch(self):
1540 1605 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1541 1606 if err:
1542 1607 current = None
1543 1608 return current
1544 1609
1545 1610 def _gitremote(self, remote):
1546 1611 out = self._gitcommand(['remote', 'show', '-n', remote])
1547 1612 line = out.split('\n')[1]
1548 1613 i = line.index('URL: ') + len('URL: ')
1549 1614 return line[i:]
1550 1615
1551 1616 def _githavelocally(self, revision):
1552 1617 out, code = self._gitdir(['cat-file', '-e', revision])
1553 1618 return code == 0
1554 1619
1555 1620 def _gitisancestor(self, r1, r2):
1556 1621 base = self._gitcommand(['merge-base', r1, r2])
1557 1622 return base == r1
1558 1623
1559 1624 def _gitisbare(self):
1560 1625 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1561 1626
1562 1627 def _gitupdatestat(self):
1563 1628 """This must be run before git diff-index.
1564 1629 diff-index only looks at changes to file stat;
1565 1630 this command looks at file contents and updates the stat."""
1566 1631 self._gitcommand(['update-index', '-q', '--refresh'])
1567 1632
1568 1633 def _gitbranchmap(self):
1569 1634 '''returns 2 things:
1570 1635 a map from git branch to revision
1571 1636 a map from revision to branches'''
1572 1637 branch2rev = {}
1573 1638 rev2branch = {}
1574 1639
1575 1640 out = self._gitcommand(['for-each-ref', '--format',
1576 1641 '%(objectname) %(refname)'])
1577 1642 for line in out.split('\n'):
1578 1643 revision, ref = line.split(' ')
1579 1644 if (not ref.startswith('refs/heads/') and
1580 1645 not ref.startswith('refs/remotes/')):
1581 1646 continue
1582 1647 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1583 1648 continue # ignore remote/HEAD redirects
1584 1649 branch2rev[ref] = revision
1585 1650 rev2branch.setdefault(revision, []).append(ref)
1586 1651 return branch2rev, rev2branch
1587 1652
1588 1653 def _gittracking(self, branches):
1589 1654 'return map of remote branch to local tracking branch'
1590 1655 # assumes no more than one local tracking branch for each remote
1591 1656 tracking = {}
1592 1657 for b in branches:
1593 1658 if b.startswith('refs/remotes/'):
1594 1659 continue
1595 1660 bname = b.split('/', 2)[2]
1596 1661 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1597 1662 if remote:
1598 1663 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1599 1664 tracking['refs/remotes/%s/%s' %
1600 1665 (remote, ref.split('/', 2)[2])] = b
1601 1666 return tracking
1602 1667
1603 1668 def _abssource(self, source):
1604 1669 if '://' not in source:
1605 1670 # recognize the scp syntax as an absolute source
1606 1671 colon = source.find(':')
1607 1672 if colon != -1 and '/' not in source[:colon]:
1608 1673 return source
1609 1674 self._subsource = source
1610 1675 return _abssource(self)
1611 1676
1612 1677 def _fetch(self, source, revision):
1613 1678 if self._gitmissing():
1614 1679 # SEC: check for safe ssh url
1615 1680 util.checksafessh(source)
1616 1681
1617 1682 source = self._abssource(source)
1618 1683 self.ui.status(_('cloning subrepo %s from %s\n') %
1619 1684 (self._relpath, source))
1620 1685 self._gitnodir(['clone', source, self._abspath])
1621 1686 if self._githavelocally(revision):
1622 1687 return
1623 1688 self.ui.status(_('pulling subrepo %s from %s\n') %
1624 1689 (self._relpath, self._gitremote('origin')))
1625 1690 # try only origin: the originally cloned repo
1626 1691 self._gitcommand(['fetch'])
1627 1692 if not self._githavelocally(revision):
1628 1693 raise error.Abort(_('revision %s does not exist in subrepository '
1629 1694 '"%s"\n') % (revision, self._relpath))
1630 1695
1631 1696 @annotatesubrepoerror
1632 1697 def dirty(self, ignoreupdate=False, missing=False):
1633 1698 if self._gitmissing():
1634 1699 return self._state[1] != ''
1635 1700 if self._gitisbare():
1636 1701 return True
1637 1702 if not ignoreupdate and self._state[1] != self._gitstate():
1638 1703 # different version checked out
1639 1704 return True
1640 1705 # check for staged changes or modified files; ignore untracked files
1641 1706 self._gitupdatestat()
1642 1707 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1643 1708 return code == 1
1644 1709
1645 1710 def basestate(self):
1646 1711 return self._gitstate()
1647 1712
1648 1713 @annotatesubrepoerror
1649 1714 def get(self, state, overwrite=False):
1650 1715 source, revision, kind = state
1651 1716 if not revision:
1652 1717 self.remove()
1653 1718 return
1654 1719 self._fetch(source, revision)
1655 1720 # if the repo was set to be bare, unbare it
1656 1721 if self._gitisbare():
1657 1722 self._gitcommand(['config', 'core.bare', 'false'])
1658 1723 if self._gitstate() == revision:
1659 1724 self._gitcommand(['reset', '--hard', 'HEAD'])
1660 1725 return
1661 1726 elif self._gitstate() == revision:
1662 1727 if overwrite:
1663 1728 # first reset the index to unmark new files for commit, because
1664 1729 # reset --hard will otherwise throw away files added for commit,
1665 1730 # not just unmark them.
1666 1731 self._gitcommand(['reset', 'HEAD'])
1667 1732 self._gitcommand(['reset', '--hard', 'HEAD'])
1668 1733 return
1669 1734 branch2rev, rev2branch = self._gitbranchmap()
1670 1735
1671 1736 def checkout(args):
1672 1737 cmd = ['checkout']
1673 1738 if overwrite:
1674 1739 # first reset the index to unmark new files for commit, because
1675 1740 # the -f option will otherwise throw away files added for
1676 1741 # commit, not just unmark them.
1677 1742 self._gitcommand(['reset', 'HEAD'])
1678 1743 cmd.append('-f')
1679 1744 self._gitcommand(cmd + args)
1680 1745 _sanitize(self.ui, self.wvfs, '.git')
1681 1746
1682 1747 def rawcheckout():
1683 1748 # no branch to checkout, check it out with no branch
1684 1749 self.ui.warn(_('checking out detached HEAD in '
1685 1750 'subrepository "%s"\n') % self._relpath)
1686 1751 self.ui.warn(_('check out a git branch if you intend '
1687 1752 'to make changes\n'))
1688 1753 checkout(['-q', revision])
1689 1754
1690 1755 if revision not in rev2branch:
1691 1756 rawcheckout()
1692 1757 return
1693 1758 branches = rev2branch[revision]
1694 1759 firstlocalbranch = None
1695 1760 for b in branches:
1696 1761 if b == 'refs/heads/master':
1697 1762 # master trumps all other branches
1698 1763 checkout(['refs/heads/master'])
1699 1764 return
1700 1765 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1701 1766 firstlocalbranch = b
1702 1767 if firstlocalbranch:
1703 1768 checkout([firstlocalbranch])
1704 1769 return
1705 1770
1706 1771 tracking = self._gittracking(branch2rev.keys())
1707 1772 # choose a remote branch already tracked if possible
1708 1773 remote = branches[0]
1709 1774 if remote not in tracking:
1710 1775 for b in branches:
1711 1776 if b in tracking:
1712 1777 remote = b
1713 1778 break
1714 1779
1715 1780 if remote not in tracking:
1716 1781 # create a new local tracking branch
1717 1782 local = remote.split('/', 3)[3]
1718 1783 checkout(['-b', local, remote])
1719 1784 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1720 1785 # When updating to a tracked remote branch,
1721 1786 # if the local tracking branch is downstream of it,
1722 1787 # a normal `git pull` would have performed a "fast-forward merge"
1723 1788 # which is equivalent to updating the local branch to the remote.
1724 1789 # Since we are only looking at branching at update, we need to
1725 1790 # detect this situation and perform this action lazily.
1726 1791 if tracking[remote] != self._gitcurrentbranch():
1727 1792 checkout([tracking[remote]])
1728 1793 self._gitcommand(['merge', '--ff', remote])
1729 1794 _sanitize(self.ui, self.wvfs, '.git')
1730 1795 else:
1731 1796 # a real merge would be required, just checkout the revision
1732 1797 rawcheckout()
1733 1798
1734 1799 @annotatesubrepoerror
1735 1800 def commit(self, text, user, date):
1736 1801 if self._gitmissing():
1737 1802 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1738 1803 cmd = ['commit', '-a', '-m', text]
1739 1804 env = encoding.environ.copy()
1740 1805 if user:
1741 1806 cmd += ['--author', user]
1742 1807 if date:
1743 1808 # git's date parser silently ignores when seconds < 1e9
1744 1809 # convert to ISO8601
1745 1810 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1746 1811 '%Y-%m-%dT%H:%M:%S %1%2')
1747 1812 self._gitcommand(cmd, env=env)
1748 1813 # make sure commit works otherwise HEAD might not exist under certain
1749 1814 # circumstances
1750 1815 return self._gitstate()
1751 1816
1752 1817 @annotatesubrepoerror
1753 1818 def merge(self, state):
1754 1819 source, revision, kind = state
1755 1820 self._fetch(source, revision)
1756 1821 base = self._gitcommand(['merge-base', revision, self._state[1]])
1757 1822 self._gitupdatestat()
1758 1823 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1759 1824
1760 1825 def mergefunc():
1761 1826 if base == revision:
1762 1827 self.get(state) # fast forward merge
1763 1828 elif base != self._state[1]:
1764 1829 self._gitcommand(['merge', '--no-commit', revision])
1765 1830 _sanitize(self.ui, self.wvfs, '.git')
1766 1831
1767 1832 if self.dirty():
1768 1833 if self._gitstate() != revision:
1769 1834 dirty = self._gitstate() == self._state[1] or code != 0
1770 1835 if _updateprompt(self.ui, self, dirty,
1771 1836 self._state[1][:7], revision[:7]):
1772 1837 mergefunc()
1773 1838 else:
1774 1839 mergefunc()
1775 1840
1776 1841 @annotatesubrepoerror
1777 1842 def push(self, opts):
1778 1843 force = opts.get('force')
1779 1844
1780 1845 if not self._state[1]:
1781 1846 return True
1782 1847 if self._gitmissing():
1783 1848 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1784 1849 # if a branch in origin contains the revision, nothing to do
1785 1850 branch2rev, rev2branch = self._gitbranchmap()
1786 1851 if self._state[1] in rev2branch:
1787 1852 for b in rev2branch[self._state[1]]:
1788 1853 if b.startswith('refs/remotes/origin/'):
1789 1854 return True
1790 1855 for b, revision in branch2rev.iteritems():
1791 1856 if b.startswith('refs/remotes/origin/'):
1792 1857 if self._gitisancestor(self._state[1], revision):
1793 1858 return True
1794 1859 # otherwise, try to push the currently checked out branch
1795 1860 cmd = ['push']
1796 1861 if force:
1797 1862 cmd.append('--force')
1798 1863
1799 1864 current = self._gitcurrentbranch()
1800 1865 if current:
1801 1866 # determine if the current branch is even useful
1802 1867 if not self._gitisancestor(self._state[1], current):
1803 1868 self.ui.warn(_('unrelated git branch checked out '
1804 1869 'in subrepository "%s"\n') % self._relpath)
1805 1870 return False
1806 1871 self.ui.status(_('pushing branch %s of subrepository "%s"\n') %
1807 1872 (current.split('/', 2)[2], self._relpath))
1808 1873 ret = self._gitdir(cmd + ['origin', current])
1809 1874 return ret[1] == 0
1810 1875 else:
1811 1876 self.ui.warn(_('no branch checked out in subrepository "%s"\n'
1812 1877 'cannot push revision %s\n') %
1813 1878 (self._relpath, self._state[1]))
1814 1879 return False
1815 1880
1816 1881 @annotatesubrepoerror
1817 1882 def add(self, ui, match, prefix, explicitonly, **opts):
1818 1883 if self._gitmissing():
1819 1884 return []
1820 1885
1821 1886 (modified, added, removed,
1822 1887 deleted, unknown, ignored, clean) = self.status(None, unknown=True,
1823 1888 clean=True)
1824 1889
1825 1890 tracked = set()
1826 1891 # dirstates 'amn' warn, 'r' is added again
1827 1892 for l in (modified, added, deleted, clean):
1828 1893 tracked.update(l)
1829 1894
1830 1895 # Unknown files not of interest will be rejected by the matcher
1831 1896 files = unknown
1832 1897 files.extend(match.files())
1833 1898
1834 1899 rejected = []
1835 1900
1836 1901 files = [f for f in sorted(set(files)) if match(f)]
1837 1902 for f in files:
1838 1903 exact = match.exact(f)
1839 1904 command = ["add"]
1840 1905 if exact:
1841 1906 command.append("-f") #should be added, even if ignored
1842 1907 if ui.verbose or not exact:
1843 1908 ui.status(_('adding %s\n') % match.rel(f))
1844 1909
1845 1910 if f in tracked: # hg prints 'adding' even if already tracked
1846 1911 if exact:
1847 1912 rejected.append(f)
1848 1913 continue
1849 1914 if not opts.get(r'dry_run'):
1850 1915 self._gitcommand(command + [f])
1851 1916
1852 1917 for f in rejected:
1853 1918 ui.warn(_("%s already tracked!\n") % match.abs(f))
1854 1919
1855 1920 return rejected
1856 1921
1857 1922 @annotatesubrepoerror
1858 1923 def remove(self):
1859 1924 if self._gitmissing():
1860 1925 return
1861 1926 if self.dirty():
1862 1927 self.ui.warn(_('not removing repo %s because '
1863 1928 'it has changes.\n') % self._relpath)
1864 1929 return
1865 1930 # we can't fully delete the repository as it may contain
1866 1931 # local-only history
1867 1932 self.ui.note(_('removing subrepo %s\n') % self._relpath)
1868 1933 self._gitcommand(['config', 'core.bare', 'true'])
1869 1934 for f, kind in self.wvfs.readdir():
1870 1935 if f == '.git':
1871 1936 continue
1872 1937 if kind == stat.S_IFDIR:
1873 1938 self.wvfs.rmtree(f)
1874 1939 else:
1875 1940 self.wvfs.unlink(f)
1876 1941
1877 1942 def archive(self, archiver, prefix, match=None, decode=True):
1878 1943 total = 0
1879 1944 source, revision = self._state
1880 1945 if not revision:
1881 1946 return total
1882 1947 self._fetch(source, revision)
1883 1948
1884 1949 # Parse git's native archive command.
1885 1950 # This should be much faster than manually traversing the trees
1886 1951 # and objects with many subprocess calls.
1887 1952 tarstream = self._gitcommand(['archive', revision], stream=True)
1888 1953 tar = tarfile.open(fileobj=tarstream, mode='r|')
1889 1954 relpath = subrelpath(self)
1890 1955 self.ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1891 1956 for i, info in enumerate(tar):
1892 1957 if info.isdir():
1893 1958 continue
1894 1959 if match and not match(info.name):
1895 1960 continue
1896 1961 if info.issym():
1897 1962 data = info.linkname
1898 1963 else:
1899 1964 data = tar.extractfile(info).read()
1900 1965 archiver.addfile(prefix + self._path + '/' + info.name,
1901 1966 info.mode, info.issym(), data)
1902 1967 total += 1
1903 1968 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
1904 1969 unit=_('files'))
1905 1970 self.ui.progress(_('archiving (%s)') % relpath, None)
1906 1971 return total
1907 1972
1908 1973
1909 1974 @annotatesubrepoerror
1910 1975 def cat(self, match, fm, fntemplate, prefix, **opts):
1911 1976 rev = self._state[1]
1912 1977 if match.anypats():
1913 1978 return 1 #No support for include/exclude yet
1914 1979
1915 1980 if not match.files():
1916 1981 return 1
1917 1982
1918 1983 # TODO: add support for non-plain formatter (see cmdutil.cat())
1919 1984 for f in match.files():
1920 1985 output = self._gitcommand(["show", "%s:%s" % (rev, f)])
1921 1986 fp = cmdutil.makefileobj(self._subparent, fntemplate,
1922 1987 self._ctx.node(),
1923 1988 pathname=self.wvfs.reljoin(prefix, f))
1924 1989 fp.write(output)
1925 1990 fp.close()
1926 1991 return 0
1927 1992
1928 1993
1929 1994 @annotatesubrepoerror
1930 1995 def status(self, rev2, **opts):
1931 1996 rev1 = self._state[1]
1932 1997 if self._gitmissing() or not rev1:
1933 1998 # if the repo is missing, return no results
1934 1999 return scmutil.status([], [], [], [], [], [], [])
1935 2000 modified, added, removed = [], [], []
1936 2001 self._gitupdatestat()
1937 2002 if rev2:
1938 2003 command = ['diff-tree', '--no-renames', '-r', rev1, rev2]
1939 2004 else:
1940 2005 command = ['diff-index', '--no-renames', rev1]
1941 2006 out = self._gitcommand(command)
1942 2007 for line in out.split('\n'):
1943 2008 tab = line.find('\t')
1944 2009 if tab == -1:
1945 2010 continue
1946 2011 status, f = line[tab - 1], line[tab + 1:]
1947 2012 if status == 'M':
1948 2013 modified.append(f)
1949 2014 elif status == 'A':
1950 2015 added.append(f)
1951 2016 elif status == 'D':
1952 2017 removed.append(f)
1953 2018
1954 2019 deleted, unknown, ignored, clean = [], [], [], []
1955 2020
1956 2021 command = ['status', '--porcelain', '-z']
1957 2022 if opts.get(r'unknown'):
1958 2023 command += ['--untracked-files=all']
1959 2024 if opts.get(r'ignored'):
1960 2025 command += ['--ignored']
1961 2026 out = self._gitcommand(command)
1962 2027
1963 2028 changedfiles = set()
1964 2029 changedfiles.update(modified)
1965 2030 changedfiles.update(added)
1966 2031 changedfiles.update(removed)
1967 2032 for line in out.split('\0'):
1968 2033 if not line:
1969 2034 continue
1970 2035 st = line[0:2]
1971 2036 #moves and copies show 2 files on one line
1972 2037 if line.find('\0') >= 0:
1973 2038 filename1, filename2 = line[3:].split('\0')
1974 2039 else:
1975 2040 filename1 = line[3:]
1976 2041 filename2 = None
1977 2042
1978 2043 changedfiles.add(filename1)
1979 2044 if filename2:
1980 2045 changedfiles.add(filename2)
1981 2046
1982 2047 if st == '??':
1983 2048 unknown.append(filename1)
1984 2049 elif st == '!!':
1985 2050 ignored.append(filename1)
1986 2051
1987 2052 if opts.get(r'clean'):
1988 2053 out = self._gitcommand(['ls-files'])
1989 2054 for f in out.split('\n'):
1990 2055 if not f in changedfiles:
1991 2056 clean.append(f)
1992 2057
1993 2058 return scmutil.status(modified, added, removed, deleted,
1994 2059 unknown, ignored, clean)
1995 2060
1996 2061 @annotatesubrepoerror
1997 2062 def diff(self, ui, diffopts, node2, match, prefix, **opts):
1998 2063 node1 = self._state[1]
1999 2064 cmd = ['diff', '--no-renames']
2000 2065 if opts[r'stat']:
2001 2066 cmd.append('--stat')
2002 2067 else:
2003 2068 # for Git, this also implies '-p'
2004 2069 cmd.append('-U%d' % diffopts.context)
2005 2070
2006 2071 gitprefix = self.wvfs.reljoin(prefix, self._path)
2007 2072
2008 2073 if diffopts.noprefix:
2009 2074 cmd.extend(['--src-prefix=%s/' % gitprefix,
2010 2075 '--dst-prefix=%s/' % gitprefix])
2011 2076 else:
2012 2077 cmd.extend(['--src-prefix=a/%s/' % gitprefix,
2013 2078 '--dst-prefix=b/%s/' % gitprefix])
2014 2079
2015 2080 if diffopts.ignorews:
2016 2081 cmd.append('--ignore-all-space')
2017 2082 if diffopts.ignorewsamount:
2018 2083 cmd.append('--ignore-space-change')
2019 2084 if self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4) \
2020 2085 and diffopts.ignoreblanklines:
2021 2086 cmd.append('--ignore-blank-lines')
2022 2087
2023 2088 cmd.append(node1)
2024 2089 if node2:
2025 2090 cmd.append(node2)
2026 2091
2027 2092 output = ""
2028 2093 if match.always():
2029 2094 output += self._gitcommand(cmd) + '\n'
2030 2095 else:
2031 2096 st = self.status(node2)[:3]
2032 2097 files = [f for sublist in st for f in sublist]
2033 2098 for f in files:
2034 2099 if match(f):
2035 2100 output += self._gitcommand(cmd + ['--', f]) + '\n'
2036 2101
2037 2102 if output.strip():
2038 2103 ui.write(output)
2039 2104
2040 2105 @annotatesubrepoerror
2041 2106 def revert(self, substate, *pats, **opts):
2042 2107 self.ui.status(_('reverting subrepo %s\n') % substate[0])
2043 2108 if not opts.get(r'no_backup'):
2044 2109 status = self.status(None)
2045 2110 names = status.modified
2046 2111 for name in names:
2047 2112 bakname = scmutil.origpath(self.ui, self._subparent, name)
2048 2113 self.ui.note(_('saving current version of %s as %s\n') %
2049 2114 (name, bakname))
2050 2115 self.wvfs.rename(name, bakname)
2051 2116
2052 2117 if not opts.get(r'dry_run'):
2053 2118 self.get(substate, overwrite=True)
2054 2119 return []
2055 2120
2056 2121 def shortid(self, revid):
2057 2122 return revid[:7]
2058 2123
2059 2124 types = {
2060 2125 'hg': hgsubrepo,
2061 2126 'svn': svnsubrepo,
2062 2127 'git': gitsubrepo,
2063 2128 }
General Comments 0
You need to be logged in to leave comments. Login now